diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 2a4a39c854..d3890a4c04 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -39,6 +39,7 @@ Follow conventional commits: `: ` - [ ] I've tested my changes locally - [ ] I've followed the code principles (SOLID, DRY, KISS) - [ ] My PR is small and focused (< 400 lines ideally) +- [ ] **(Python only)** All file operations specify `encoding="utf-8"` for text files ## CI/Testing Requirements diff --git a/.github/workflows/beta-release.yml b/.github/workflows/beta-release.yml index f19d3e607a..2802774fbd 100644 --- a/.github/workflows/beta-release.yml +++ b/.github/workflows/beta-release.yml @@ -97,13 +97,21 @@ jobs: - name: Install Rust toolchain (for building native Python packages) uses: dtolnay/rust-toolchain@stable + - name: Cache pip wheel cache (for compiled packages like real_ladybug) + uses: actions/cache@v4 + with: + path: ~/Library/Caches/pip + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8-rust + key: python-bundle-${{ runner.os }}-x64-3.12.8-rust-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8-rust- - name: Build application run: cd apps/frontend && npm run build @@ -181,13 +189,21 @@ jobs: - name: Install dependencies run: cd apps/frontend && npm ci + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~/Library/Caches/pip + key: pip-wheel-${{ runner.os }}-arm64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-arm64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-arm64-3.12.8 + key: python-bundle-${{ runner.os }}-arm64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-arm64- + python-bundle-${{ runner.os }}-arm64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -265,13 +281,21 @@ jobs: - name: Install dependencies run: cd apps/frontend && npm ci + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~\AppData\Local\pip\Cache + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8 + key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -335,13 +359,21 @@ jobs: flatpak install -y --user flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08 flatpak install -y --user flathub org.electronjs.Electron2.BaseApp//25.08 + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8 + key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8- - name: Build application run: cd apps/frontend && npm run build diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml index d50940c188..ac10837861 100644 --- a/.github/workflows/prepare-release.yml +++ b/.github/workflows/prepare-release.yml @@ -1,8 +1,10 @@ name: Prepare Release # Triggers when code is pushed to main (e.g., merging develop โ†’ main) -# If package.json version is newer than the latest tag, creates a new tag -# which then triggers the release.yml workflow +# If package.json version is newer than the latest tag: +# 1. Validates CHANGELOG.md has an entry for this version (FAILS if missing) +# 2. Extracts release notes from CHANGELOG.md +# 3. Creates a new tag which triggers release.yml on: push: @@ -67,8 +69,122 @@ jobs: echo "โญ๏ธ No release needed (package version not newer than latest tag)" fi - - name: Create and push tag + # CRITICAL: Validate CHANGELOG.md has entry for this version BEFORE creating tag + - name: Validate and extract changelog if: steps.check.outputs.should_release == 'true' + id: changelog + run: | + VERSION="${{ steps.check.outputs.new_version }}" + CHANGELOG_FILE="CHANGELOG.md" + + echo "๐Ÿ” Validating CHANGELOG.md for version $VERSION..." + + if [ ! -f "$CHANGELOG_FILE" ]; then + echo "::error::CHANGELOG.md not found! Please create CHANGELOG.md with release notes." + exit 1 + fi + + # Extract changelog section for this version + # Looks for "## X.Y.Z" header and captures until next "## " or "---" or end + CHANGELOG_CONTENT=$(awk -v ver="$VERSION" ' + BEGIN { found=0; content="" } + /^## / { + if (found) exit + # Match version at start of header (e.g., "## 2.7.3 -" or "## 2.7.3") + if ($2 == ver || $2 ~ "^"ver"[[:space:]]*-") { + found=1 + # Skip the header line itself, we will add our own + next + } + } + /^---$/ { if (found) exit } + found { content = content $0 "\n" } + END { + if (!found) { + print "NOT_FOUND" + exit 1 + } + # Trim leading/trailing whitespace + gsub(/^[[:space:]]+|[[:space:]]+$/, "", content) + print content + } + ' "$CHANGELOG_FILE") + + if [ "$CHANGELOG_CONTENT" = "NOT_FOUND" ] || [ -z "$CHANGELOG_CONTENT" ]; then + echo "" + echo "::error::โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" + echo "::error:: CHANGELOG VALIDATION FAILED" + echo "::error::โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" + echo "::error::" + echo "::error:: Version $VERSION not found in CHANGELOG.md!" + echo "::error::" + echo "::error:: Before releasing, please update CHANGELOG.md with an entry like:" + echo "::error::" + echo "::error:: ## $VERSION - Your Release Title" + echo "::error::" + echo "::error:: ### โœจ New Features" + echo "::error:: - Feature description" + echo "::error::" + echo "::error:: ### ๐Ÿ› Bug Fixes" + echo "::error:: - Fix description" + echo "::error::" + echo "::error::โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" + echo "" + + # Also add to job summary for visibility + echo "## โŒ Release Blocked: Missing Changelog" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Version **$VERSION** was not found in CHANGELOG.md." >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### How to fix:" >> $GITHUB_STEP_SUMMARY + echo "1. Update CHANGELOG.md with release notes for version $VERSION" >> $GITHUB_STEP_SUMMARY + echo "2. Commit and push the changes" >> $GITHUB_STEP_SUMMARY + echo "3. The release will automatically retry" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### Expected format:" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`markdown" >> $GITHUB_STEP_SUMMARY + echo "## $VERSION - Release Title" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### โœจ New Features" >> $GITHUB_STEP_SUMMARY + echo "- Feature description" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### ๐Ÿ› Bug Fixes" >> $GITHUB_STEP_SUMMARY + echo "- Fix description" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY + + exit 1 + fi + + echo "โœ… Found changelog entry for version $VERSION" + echo "" + echo "--- Extracted Release Notes ---" + echo "$CHANGELOG_CONTENT" + echo "--- End Release Notes ---" + + # Save changelog to file for artifact upload + echo "$CHANGELOG_CONTENT" > changelog-extract.md + + # Also save to output (for short changelogs) + # Using heredoc for multiline output + { + echo "content<> $GITHUB_OUTPUT + + echo "changelog_valid=true" >> $GITHUB_OUTPUT + + # Upload changelog as artifact for release.yml to use + - name: Upload changelog artifact + if: steps.check.outputs.should_release == 'true' && steps.changelog.outputs.changelog_valid == 'true' + uses: actions/upload-artifact@v4 + with: + name: changelog-${{ steps.check.outputs.new_version }} + path: changelog-extract.md + retention-days: 1 + + - name: Create and push tag + if: steps.check.outputs.should_release == 'true' && steps.changelog.outputs.changelog_valid == 'true' run: | VERSION="${{ steps.check.outputs.new_version }}" TAG="v$VERSION" @@ -85,17 +201,19 @@ jobs: - name: Summary run: | - if [ "${{ steps.check.outputs.should_release }}" = "true" ]; then + if [ "${{ steps.check.outputs.should_release }}" = "true" ] && [ "${{ steps.changelog.outputs.changelog_valid }}" = "true" ]; then echo "## ๐Ÿš€ Release Triggered" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Version:** v${{ steps.check.outputs.new_version }}" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY + echo "โœ… Changelog validated and extracted from CHANGELOG.md" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY echo "The release workflow has been triggered and will:" >> $GITHUB_STEP_SUMMARY echo "1. Build binaries for all platforms" >> $GITHUB_STEP_SUMMARY - echo "2. Generate changelog from PRs" >> $GITHUB_STEP_SUMMARY + echo "2. Use changelog from CHANGELOG.md" >> $GITHUB_STEP_SUMMARY echo "3. Create GitHub release" >> $GITHUB_STEP_SUMMARY echo "4. Update README with new version" >> $GITHUB_STEP_SUMMARY - else + elif [ "${{ steps.check.outputs.should_release }}" = "false" ]; then echo "## โญ๏ธ No Release Needed" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Package version:** ${{ steps.package.outputs.version }}" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c6b6ddc99c..36f4e13877 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -46,13 +46,21 @@ jobs: - name: Install Rust toolchain (for building native Python packages) uses: dtolnay/rust-toolchain@stable + - name: Cache pip wheel cache (for compiled packages like real_ladybug) + uses: actions/cache@v4 + with: + path: ~/Library/Caches/pip + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8-rust + key: python-bundle-${{ runner.os }}-x64-3.12.8-rust-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8-rust- - name: Build application run: cd apps/frontend && npm run build @@ -93,6 +101,8 @@ jobs: path: | apps/frontend/dist/*.dmg apps/frontend/dist/*.zip + apps/frontend/dist/*.yml + apps/frontend/dist/*.blockmap # Apple Silicon build on ARM64 runner for native compilation build-macos-arm64: @@ -123,13 +133,21 @@ jobs: - name: Install dependencies run: cd apps/frontend && npm ci + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~/Library/Caches/pip + key: pip-wheel-${{ runner.os }}-arm64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-arm64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-arm64-3.12.8 + key: python-bundle-${{ runner.os }}-arm64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-arm64- + python-bundle-${{ runner.os }}-arm64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -170,6 +188,8 @@ jobs: path: | apps/frontend/dist/*.dmg apps/frontend/dist/*.zip + apps/frontend/dist/*.yml + apps/frontend/dist/*.blockmap build-windows: runs-on: windows-latest @@ -200,13 +220,21 @@ jobs: - name: Install dependencies run: cd apps/frontend && npm ci + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~\AppData\Local\pip\Cache + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8 + key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -224,6 +252,8 @@ jobs: name: windows-builds path: | apps/frontend/dist/*.exe + apps/frontend/dist/*.yml + apps/frontend/dist/*.blockmap build-linux: runs-on: ubuntu-latest @@ -261,13 +291,21 @@ jobs: flatpak install -y --user flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08 flatpak install -y --user flathub org.electronjs.Electron2.BaseApp//25.08 + - name: Cache pip wheel cache + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }} + restore-keys: | + pip-wheel-${{ runner.os }}-x64- + - name: Cache bundled Python uses: actions/cache@v4 with: path: apps/frontend/python-runtime - key: python-bundle-${{ runner.os }}-x64-3.12.8 + key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }} restore-keys: | - python-bundle-${{ runner.os }}-x64- + python-bundle-${{ runner.os }}-x64-3.12.8- - name: Build application run: cd apps/frontend && npm run build @@ -285,6 +323,8 @@ jobs: apps/frontend/dist/*.AppImage apps/frontend/dist/*.deb apps/frontend/dist/*.flatpak + apps/frontend/dist/*.yml + apps/frontend/dist/*.blockmap create-release: needs: [build-macos-intel, build-macos-arm64, build-windows, build-linux] @@ -304,16 +344,30 @@ jobs: - name: Flatten and validate artifacts run: | mkdir -p release-assets - find dist -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) -exec cp {} release-assets/ \; + find dist -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" -o -name "*.yml" -o -name "*.blockmap" \) -exec cp {} release-assets/ \; + + # Validate that installer files exist (not just manifests) + installer_count=$(find release-assets -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) | wc -l) + if [ "$installer_count" -eq 0 ]; then + echo "::error::No installer artifacts found! Expected .dmg, .zip, .exe, .AppImage, .deb, or .flatpak files." + exit 1 + fi + + echo "Found $installer_count installer(s):" + find release-assets -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) -exec basename {} \; - # Validate that at least one artifact was copied - artifact_count=$(find release-assets -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) | wc -l) - if [ "$artifact_count" -eq 0 ]; then - echo "::error::No build artifacts found! Expected .dmg, .zip, .exe, .AppImage, .deb, or .flatpak files." + # Validate that electron-updater manifest files are present (required for auto-updates) + yml_count=$(find release-assets -type f -name "*.yml" | wc -l) + if [ "$yml_count" -eq 0 ]; then + echo "::error::No update manifest (.yml) files found! Auto-update architecture detection will not work." exit 1 fi - echo "Found $artifact_count artifact(s):" + echo "Found $yml_count manifest file(s):" + find release-assets -type f -name "*.yml" -exec basename {} \; + + echo "" + echo "All release assets:" ls -la release-assets/ - name: Generate checksums @@ -473,23 +527,78 @@ jobs: cat release-assets/checksums.sha256 >> $GITHUB_STEP_SUMMARY echo "\`\`\`" >> $GITHUB_STEP_SUMMARY - - name: Generate changelog - if: ${{ github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.dry_run != true) }} + - name: Extract changelog from CHANGELOG.md + if: ${{ github.event_name == 'push' }} id: changelog - uses: release-drafter/release-drafter@v6 - with: - config-name: release-drafter.yml - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Extract version from tag (v2.7.2 -> 2.7.2) + VERSION=${GITHUB_REF_NAME#v} + CHANGELOG_FILE="CHANGELOG.md" + + echo "๐Ÿ“‹ Extracting release notes for version $VERSION from CHANGELOG.md..." + + if [ ! -f "$CHANGELOG_FILE" ]; then + echo "::warning::CHANGELOG.md not found, using minimal release notes" + echo "body=Release v$VERSION" >> $GITHUB_OUTPUT + exit 0 + fi + + # Extract changelog section for this version + # Looks for "## X.Y.Z" header and captures until next "## " or "---" + CHANGELOG_CONTENT=$(awk -v ver="$VERSION" ' + BEGIN { found=0; content="" } + /^## / { + if (found) exit + # Match version at start of header (e.g., "## 2.7.3 -" or "## 2.7.3") + if ($2 == ver || $2 ~ "^"ver"[[:space:]]*-") { + found=1 + next + } + } + /^---$/ { if (found) exit } + found { content = content $0 "\n" } + END { + if (!found) { + print "NOT_FOUND" + exit 0 + } + # Trim leading/trailing whitespace + gsub(/^[[:space:]]+|[[:space:]]+$/, "", content) + print content + } + ' "$CHANGELOG_FILE") + + if [ "$CHANGELOG_CONTENT" = "NOT_FOUND" ] || [ -z "$CHANGELOG_CONTENT" ]; then + echo "::warning::Version $VERSION not found in CHANGELOG.md, using minimal release notes" + CHANGELOG_CONTENT="Release v$VERSION + +See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details." + fi + + echo "โœ… Extracted changelog content" + + # Save to file first (more reliable for multiline) + echo "$CHANGELOG_CONTENT" > changelog-body.md + + # Use file-based output for multiline content + { + echo "body<> $GITHUB_OUTPUT - name: Create Release - if: ${{ github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.dry_run != true) }} + if: ${{ github.event_name == 'push' }} uses: softprops/action-gh-release@v2 with: body: | ${{ steps.changelog.outputs.body }} + --- + ${{ steps.virustotal.outputs.vt_results }} + + **Full Changelog**: https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md files: release-assets/* draft: false prerelease: ${{ contains(github.ref, 'beta') || contains(github.ref, 'alpha') }} @@ -500,7 +609,8 @@ jobs: update-readme: needs: [create-release] runs-on: ubuntu-latest - if: ${{ github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.dry_run != true) }} + # Only update README on actual releases (tag push), not dry runs + if: ${{ github.event_name == 'push' }} permissions: contents: write steps: diff --git a/.gitignore b/.gitignore index 7f53e4c59a..e90fde6a14 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ Desktop.ini .env .env.* !.env.example +/config.json *.pem *.key *.crt @@ -163,3 +164,4 @@ _bmad-output/ .claude/ /docs OPUS_ANALYSIS_AND_IDEAS.md +/.github/agents diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f67b77c813..0f996bccc2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,6 @@ repos: # Version sync - propagate root package.json version to all files + # NOTE: Skip in worktrees - version sync modifies root files which don't exist in worktree - repo: local hooks: - id: version-sync @@ -8,6 +9,12 @@ repos: args: - -c - | + # Skip in worktrees - .git is a file pointing to main repo, not a directory + # Version sync modifies root-level files that may not exist in worktree context + if [ -f ".git" ]; then + echo "Skipping version-sync in worktree (root files not accessible)" + exit 0 + fi VERSION=$(node -p "require('./package.json').version") if [ -n "$VERSION" ]; then @@ -81,6 +88,7 @@ repos: # Python tests (apps/backend/) - skip slow/integration tests for pre-commit speed # Tests to skip: graphiti (external deps), merge_file_tracker/service_orchestrator/worktree/workspace (Windows path/git issues) + # NOTE: Skip this hook in worktrees (where .git is a file, not a directory) - repo: local hooks: - id: pytest @@ -89,6 +97,12 @@ repos: args: - -c - | + # Skip in worktrees - .git is a file pointing to main repo, not a directory + # This prevents path resolution issues with ../../tests/ in worktree context + if [ -f ".git" ]; then + echo "Skipping pytest in worktree (path resolution would fail)" + exit 0 + fi cd apps/backend if [ -f ".venv/bin/pytest" ]; then PYTEST_CMD=".venv/bin/pytest" @@ -113,18 +127,37 @@ repos: pass_filenames: false # Frontend linting (apps/frontend/) + # NOTE: These hooks check for worktree context to avoid npm/node_modules issues - repo: local hooks: - id: eslint name: ESLint - entry: bash -c 'cd apps/frontend && npm run lint' + entry: bash + args: + - -c + - | + # Skip in worktrees if node_modules doesn't exist (dependencies not installed) + if [ -f ".git" ] && [ ! -d "apps/frontend/node_modules" ]; then + echo "Skipping ESLint in worktree (node_modules not found)" + exit 0 + fi + cd apps/frontend && npm run lint language: system files: ^apps/frontend/.*\.(ts|tsx|js|jsx)$ pass_filenames: false - id: typecheck name: TypeScript Check - entry: bash -c 'cd apps/frontend && npm run typecheck' + entry: bash + args: + - -c + - | + # Skip in worktrees if node_modules doesn't exist (dependencies not installed) + if [ -f ".git" ] && [ ! -d "apps/frontend/node_modules" ]; then + echo "Skipping TypeScript check in worktree (node_modules not found)" + exit 0 + fi + cd apps/frontend && npm run typecheck language: system files: ^apps/frontend/.*\.(ts|tsx)$ pass_filenames: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 2fb1a26e82..22c43eb8da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,283 @@ +## 2.7.2 - Stability & Performance Enhancements + +### โœจ New Features + +- Added refresh button to Kanban board for manually reloading tasks + +- Terminal dropdown with built-in and external options in task review + +- Centralized CLI tool path management with customizable settings + +- Files tab in task details panel for better file organization + +- Enhanced PR review page with filtering capabilities + +- GitLab integration support + +- Automated PR review with follow-up support and structured outputs + +- UI scale feature with 75-200% range for accessibility + +- Python 3.12 bundled with packaged Electron app + +- OpenRouter support as LLM/embedding provider + +- Internationalization (i18n) system for multi-language support + +- Flatpak packaging support for Linux + +- Path-aware AI merge resolution with device code streaming + +### ๐Ÿ› ๏ธ Improvements + +- Improved terminal experience with persistent state when switching projects + +- Enhanced PR review with structured outputs and fork support + +- Better UX for display and scaling changes + +- Convert synchronous I/O to async operations in worktree handlers + +- Enhanced logs for commit linting stage + +- Remove top navigation bars for cleaner UI + +- Enhanced PR detail area visual design + +- Improved CLI tool detection with more language support + +- Added iOS/Swift project detection + +- Optimize performance by removing projectTabs from useEffect dependencies + +- Improved Python detection and version validation for compatibility + +### ๐Ÿ› Bug Fixes + +- Fixed CI Python setup and PR status gate checks + +- Fixed cross-platform CLI path detection and clearing in settings + +- Preserve original task description after spec creation + +- Fixed learning loop to retrieve patterns and gotchas from memory + +- Resolved frontend lag and updated dependencies + +- Fixed Content-Security-Policy to allow external HTTPS images + +- Fixed PR review isolation by using temporary worktree + +- Fixed Homebrew Python detection to prefer versioned Python over system python3 + +- Added support for Bun 1.2.0+ lock file format detection + +- Fixed infinite re-render loop in task selection + +- Fixed infinite loop in task detail merge preview loading + +- Resolved Windows EINVAL error when opening worktree in VS Code + +- Fixed fallback to prevent tasks stuck in ai_review status + +- Fixed SDK permissions to include spec_dir + +- Added --base-branch argument support to spec_runner + +- Allow Windows to run CC PR Reviewer + +- Fixed model selection to respect task_metadata.json + +- Improved GitHub PR review by passing repo parameter explicitly + +- Fixed electron-log imports with .js extension + +- Fixed Swift detection order in project analyzer + +- Prevent TaskEditDialog from unmounting when opened + +- Fixed subprocess handling for Python paths with spaces + +- Fixed file system race conditions and unused variables in security scanning + +- Resolved Python detection and backend packaging issues + +- Fixed version-specific links in README and pre-commit hooks + +- Fixed task status persistence reverting on refresh + +- Proper semver comparison for pre-release versions + +- Use virtual environment Python for all services to fix dotenv errors + +- Fixed explicit Windows System32 tar path for builds + +- Added augmented PATH environment to all GitHub CLI calls + +- Use PowerShell for tar extraction on Windows + +- Added --force-local flag to tar on Windows + +- Stop tracking spec files in git + +- Fixed GitHub API calls with explicit GET method for comment fetches + +- Support archiving tasks across all worktree locations + +- Validated backend source path before using it + +- Resolved spawn Python ENOENT error on Linux + +- Fixed CodeQL alerts for uncontrolled command line + +- Resolved GitHub follow-up review API issues + +- Fixed relative path normalization to POSIX format + +- Accepted bug_fix workflow_type alias during planning + +- Added global spec numbering lock to prevent collisions + +- Fixed ideation status sync + +- Stopped running process when task status changes away from in_progress + +- Removed legacy path from auto-claude source detection + +- Resolved Python environment race condition + +--- + +## What's Changed + +- fix(ci): add Python setup to beta-release and fix PR status gate checks (#565) by @Andy in c2148bb9 +- fix: detect and clear cross-platform CLI paths in settings (#535) by @Andy in 29e45505 +- fix(ui): preserve original task description after spec creation (#536) by @Andy in 7990dcb4 +- fix(memory): fix learning loop to retrieve patterns and gotchas (#530) by @Andy in f58c2578 +- fix: resolve frontend lag and update dependencies (#526) by @Andy in 30f7951a +- feat(kanban): add refresh button to manually reload tasks (#548) by @Adryan Serage in 252242f9 +- fix(csp): allow external HTTPS images in Content-Security-Policy (#549) by @Michael Ludlow in 3db02c5d +- fix(pr-review): use temporary worktree for PR review isolation (#532) by @Andy in 344ec65e +- fix: prefer versioned Homebrew Python over system python3 (#494) by @Navid in 8d58dd6f +- fix(detection): support bun.lock text format for Bun 1.2.0+ (#525) by @Andy in 4da8cd66 +- chore: bump version to 2.7.2-beta.12 (#460) by @Andy in 8e5c11ac +- Fix/windows issues (#471) by @Andy in 72106109 +- fix(ci): add Rust toolchain for Intel Mac builds (#459) by @Andy in 52a4fcc6 +- fix: create spec.md during roadmap-to-task conversion (#446) by @Mulaveesala Pranaveswar in fb6b7fc6 +- fix(pr-review): treat LOW-only findings as ready to merge (#455) by @Andy in 0f9c5b84 +- Fix/2.7.2 beta12 (#424) by @Andy in 5d8ede23 +- feat: remove top bars (#386) by @Vinรญcius Santos in da31b687 +- fix: prevent infinite re-render loop in task selection useEffect (#442) by @Abe Diaz in 2effa535 +- fix: accept Python 3.12+ in install-backend.js (#443) by @Abe Diaz in c15bb311 +- fix: infinite loop in useTaskDetail merge preview loading (#444) by @Abe Diaz in 203a970a +- fix(windows): resolve EINVAL error when opening worktree in VS Code (#434) by @Vinรญcius Santos in 3c0708b7 +- feat(frontend): Add Files tab to task details panel (#430) by @Mitsu in 666794b5 +- refactor: remove deprecated TaskDetailPanel component (#432) by @Mitsu in ac8dfcac +- fix(ui): add fallback to prevent tasks stuck in ai_review status (#397) by @Michael Ludlow in 798ca79d +- feat: Enhance the look of the PR Detail area (#427) by @Alex in bdb01549 +- ci: remove conventional commits PR title validation workflow by @AndyMik90 in 515b73b5 +- fix(client): add spec_dir to SDK permissions (#429) by @Mitsu in 88c76059 +- fix(spec_runner): add --base-branch argument support (#428) by @Mitsu in 62a75515 +- feat: enhance pr review page to include PRs filters (#423) by @Alex in 717fba04 +- feat: add gitlab integration (#254) by @Mitsu in 0a571d3a +- fix: Allow windows to run CC PR Reviewer (#406) by @Alex in 2f662469 +- fix(model): respect task_metadata.json model selection (#415) by @Andy in e7e6b521 +- feat(build): add Flatpak packaging support for Linux (#404) by @Mitsu in 230de5fc +- fix(github): pass repo parameter to GHClient for explicit PR resolution (#413) by @Andy in 4bdf7a0c +- chore(ci): remove redundant CLA GitHub Action workflow by @AndyMik90 in a39ea49d +- fix(frontend): add .js extension to electron-log/main imports by @AndyMik90 in 9aef0dd0 +- fix: 2.7.2 bug fixes and improvements (#388) by @Andy in 05131217 +- fix(analyzer): move Swift detection before Ruby detection (#401) by @Michael Ludlow in 321c9712 +- fix(ui): prevent TaskEditDialog from unmounting when opened (#395) by @Michael Ludlow in 98b12ed8 +- fix: improve CLI tool detection and add Claude CLI path settings (#393) by @Joe in aaa83131 +- feat(analyzer): add iOS/Swift project detection (#389) by @Michael Ludlow in 68548e33 +- fix(github): improve PR review with structured outputs and fork support (#363) by @Andy in 7751588e +- fix(ideation): update progress calculation to include just-completed ideation type (#381) by @Illia Filippov in 8b4ce58c +- Fixes failing spec - "gh CLI Check Handler - should return installed: true when gh CLI is found" (#370) by @Ian in bc220645 +- fix: Memory Status card respects configured embedding provider (#336) (#373) by @Michael Ludlow in db0cbea3 +- fix: fixed version-specific links in readme and pre-commit hook that updates them (#378) by @Ian in 0ca2e3f6 +- docs: add security research documentation (#361) by @Brian in 2d3b7fb4 +- fix/Improving UX for Display/Scaling Changes (#332) by @Kevin Rajan in 9bbdef09 +- fix(perf): remove projectTabs from useEffect deps to fix re-render loop (#362) by @Michael Ludlow in 753dc8bb +- fix(security): invalidate profile cache when file is created/modified (#355) by @Michael Ludlow in 20f20fa3 +- fix(subprocess): handle Python paths with spaces (#352) by @Michael Ludlow in eabe7c7d +- fix: Resolve pre-commit hook failures with version sync, pytest path, ruff version, and broken quality-dco workflow (#334) by @Ian in 1fa7a9c7 +- fix(terminal): preserve terminal state when switching projects (#358) by @Andy in 7881b2d1 +- fix(analyzer): add C#/Java/Swift/Kotlin project files to security hash (#351) by @Michael Ludlow in 4e71361b +- fix: make backend tests pass on Windows (#282) by @Oluwatosin Oyeladun in 4dcc5afa +- fix(ui): close parent modal when Edit dialog opens (#354) by @Michael Ludlow in e9782db0 +- chore: bump version to 2.7.2-beta.10 by @AndyMik90 in 40d04d7c +- feat: add terminal dropdown with inbuilt and external options in task review (#347) by @JoshuaRileyDev in fef07c95 +- refactor: remove deprecated code across backend and frontend (#348) by @Mitsu in 9d43abed +- feat: centralize CLI tool path management (#341) by @HSSAINI Saad in d51f4562 +- refactor(components): remove deprecated TaskDetailPanel re-export (#344) by @Mitsu in 787667e9 +- chore: Refactor/kanban realtime status sync (#249) by @souky-byte in 9734b70b +- refactor(settings): remove deprecated ProjectSettings modal and hooks (#343) by @Mitsu in fec6b9f3 +- perf: convert synchronous I/O to async operations in worktree handlers (#337) by @JoshuaRileyDev in d3a63b09 +- feat: bump version (#329) by @Alex in 50e3111a +- fix(ci): remove version bump to fix branch protection conflict (#325) by @Michael Ludlow in 8a80b1d5 +- fix(tasks): sync status to worktree implementation plan to prevent reset (#243) (#323) by @Alex in cb6b2165 +- fix(ci): add auto-updater manifest files and version auto-update (#317) by @Michael Ludlow in 661e47c3 +- fix(project): fix task status persistence reverting on refresh (#246) (#318) by @Michael Ludlow in e80ef79d +- fix(updater): proper semver comparison for pre-release versions (#313) by @Michael Ludlow in e1b0f743 +- fix(python): use venv Python for all services to fix dotenv errors (#311) by @Alex in 92c6f278 +- chore(ci): cancel in-progress runs (#302) by @Oluwatosin Oyeladun in 1c142273 +- fix(build): use explicit Windows System32 tar path (#308) by @Andy in c0a02a45 +- fix(github): add augmented PATH env to all gh CLI calls by @AndyMik90 in 086429cb +- fix(build): use PowerShell for tar extraction on Windows by @AndyMik90 in d9fb8f29 +- fix(build): add --force-local flag to tar on Windows (#303) by @Andy in d0b0b3df +- fix: stop tracking spec files in git (#295) by @Andy in 937a60f8 +- Fix/2.7.2 fixes (#300) by @Andy in 7a51cbd5 +- feat(merge,oauth): add path-aware AI merge resolution and device code streaming (#296) by @Andy in 26beefe3 +- feat: enhance the logs for the commit linting stage (#293) by @Alex in 8416f307 +- fix(github): add explicit GET method to gh api comment fetches (#294) by @Andy in 217249c8 +- fix(frontend): support archiving tasks across all worktree locations (#286) by @Andy in 8bb3df91 +- Potential fix for code scanning alert no. 224: Uncontrolled command line (#285) by @Andy in 5106c6e9 +- fix(frontend): validate backend source path before using it (#287) by @Andy in 3ff61274 +- feat(python): bundle Python 3.12 with packaged Electron app (#284) by @Andy in 7f19c2e1 +- fix: resolve spawn python ENOENT error on Linux by using getAugmentedEnv() (#281) by @Todd W. Bucy in d98e2830 +- fix(ci): add write permissions to beta-release update-version job by @AndyMik90 in 0b874d4b +- chore(deps): bump @xterm/xterm from 5.5.0 to 6.0.0 in /apps/frontend (#270) by @dependabot[bot] in 50dd1078 +- fix(github): resolve follow-up review API issues by @AndyMik90 in f1cc5a09 +- fix(security): resolve CodeQL file system race conditions and unused variables (#277) by @Andy in b005fa5c +- fix(ci): use correct electron-builder arch flags (#278) by @Andy in d79f2da4 +- chore(deps): bump jsdom from 26.1.0 to 27.3.0 in /apps/frontend (#268) by @dependabot[bot] in 5ac566e2 +- chore(deps): bump typescript-eslint in /apps/frontend (#269) by @dependabot[bot] in f49d4817 +- fix(ci): use develop branch for dry-run builds in beta-release workflow (#276) by @Andy in 1e1d7d9b +- fix: accept bug_fix workflow_type alias during planning (#240) by @Daniel Frey in e74a3dff +- fix(paths): normalize relative paths to posix (#239) by @Daniel Frey in 6ac8250b +- chore(deps): bump @electron/rebuild in /apps/frontend (#271) by @dependabot[bot] in a2cee694 +- chore(deps): bump vitest from 4.0.15 to 4.0.16 in /apps/frontend (#272) by @dependabot[bot] in d4cad80a +- feat(github): add automated PR review with follow-up support (#252) by @Andy in 596e9513 +- ci: implement enterprise-grade PR quality gates and security scanning (#266) by @Alex in d42041c5 +- fix: update path resolution for ollama_model_detector.py in memory handlers (#263) by @delyethan in a3f87540 +- feat: add i18n internationalization system (#248) by @Mitsu in f8438112 +- Revert "Feat/Auto Fix Github issues and do extensive AI PR reviews (#250)" (#251) by @Andy in 5e8c5308 +- Feat/Auto Fix Github issues and do extensive AI PR reviews (#250) by @Andy in 348de6df +- fix: resolve Python detection and backend packaging issues (#241) by @HSSAINI Saad in 0f7d6e05 +- fix: add future annotations import to discovery.py (#229) by @Joris Slagter in 5ccdb6ab +- Fix/ideation status sync (#212) by @souky-byte in 6ec8549f +- fix(core): add global spec numbering lock to prevent collisions (#209) by @Andy in 53527293 +- feat: Add OpenRouter as LLM/embedding provider (#162) by @Fernando Possebon in 02bef954 +- fix: Add Python 3.10+ version validation and GitHub Actions Python setup (#180 #167) (#208) by @Fernando Possebon in f168bdc3 +- fix(ci): correct welcome workflow PR message (#206) by @Andy in e3eec68a +- Feat/beta release (#193) by @Andy in 407a0bee +- feat/beta-release (#190) by @Andy in 8f766ad1 +- fix/PRs from old main setup to apps structure (#185) by @Andy in ced2ad47 +- fix: hide status badge when execution phase badge is showing (#154) by @Andy in 05f5d303 +- feat: Add UI scale feature with 75-200% range (#125) by @Enes Cingรถz in 6951251b +- fix(task): stop running process when task status changes away from in_progress by @AndyMik90 in 30e7536b +- Fix/linear 400 error by @Andy in 220faf0f +- fix: remove legacy path from auto-claude source detection (#148) by @Joris Slagter in f96c6301 +- fix: resolve Python environment race condition (#142) by @Joris Slagter in ebd8340d +- Feat: Ollama download progress tracking with new apps structure (#141) by @rayBlock in df779530 +- Feature/apps restructure v2.7.2 (#138) by @Andy in 0adaddac +- docs: Add Git Flow branching strategy to CONTRIBUTING.md by @AndyMik90 in 91f7051d + +## Thanks to all contributors + +@Andy, @Adryan Serage, @Michael Ludlow, @Navid, @Mulaveesala Pranaveswar, @Vinรญcius Santos, @Abe Diaz, @Mitsu, @Alex, @AndyMik90, @Joe, @Illia Filippov, @Ian, @Brian, @Kevin Rajan, @Oluwatosin Oyeladun, @JoshuaRileyDev, @HSSAINI Saad, @souky-byte, @Todd W. Bucy, @dependabot[bot], @Daniel Frey, @delyethan, @Joris Slagter, @Fernando Possebon, @Enes Cingรถz, @rayBlock + ## 2.7.1 - Build Pipeline Enhancements ### ๐Ÿ› ๏ธ Improvements diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bded7f5c25..295c5dd489 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -357,6 +357,64 @@ export default function(props) { - End files with a newline - Keep line length under 100 characters when practical +### File Encoding (Python) + +**Always specify `encoding="utf-8"` for text file operations** to ensure Windows compatibility. + +Windows Python defaults to `cp1252` encoding instead of UTF-8, causing errors with: +- Emoji (๐Ÿš€, โœ…, โŒ) +- International characters (รฑ, รฉ, ไธญๆ–‡, ุงู„ุนุฑุจูŠุฉ) +- Special symbols (โ„ข, ยฉ, ยฎ) + +**DO:** + +```python +# Reading files +with open(path, encoding="utf-8") as f: + content = f.read() + +# Writing files +with open(path, "w", encoding="utf-8") as f: + f.write(content) + +# Path methods +from pathlib import Path +content = Path(file).read_text(encoding="utf-8") +Path(file).write_text(content, encoding="utf-8") + +# JSON files - reading +import json +with open(path, encoding="utf-8") as f: + data = json.load(f) + +# JSON files - writing +with open(path, "w", encoding="utf-8") as f: + json.dump(data, f, ensure_ascii=False, indent=2) +``` + +**DON'T:** + +```python +# Wrong - platform-dependent encoding +with open(path) as f: + content = f.read() + +# Wrong - Path methods without encoding +content = Path(file).read_text() + +# Wrong - encoding on json.dump (not open!) +json.dump(data, f, encoding="utf-8") # ERROR +``` + +**Binary files - NO encoding:** + +```python +with open(path, "rb") as f: # Correct + data = f.read() +``` + +Our pre-commit hooks automatically check for missing encoding parameters. See [PR #782](https://github.com/AndyMik90/Auto-Claude/pull/782) for the comprehensive encoding fix and [guides/windows-development.md](guides/windows-development.md) for Windows-specific development guidance. + ## Testing ### Python Tests diff --git a/README.md b/README.md index d22c5216a2..b5c6f60cef 100644 --- a/README.md +++ b/README.md @@ -4,11 +4,9 @@ ![Auto Claude Kanban Board](.github/assets/Auto-Claude-Kanban.png) - -[![Version](https://img.shields.io/badge/version-2.7.2-blue?style=flat-square)](https://github.com/AndyMik90/Auto-Claude/releases/tag/v2.7.2) - [![License](https://img.shields.io/badge/license-AGPL--3.0-green?style=flat-square)](./agpl-3.0.txt) [![Discord](https://img.shields.io/badge/Discord-Join%20Community-5865F2?style=flat-square&logo=discord&logoColor=white)](https://discord.gg/KCXaPBr4Dj) +[![YouTube](https://img.shields.io/badge/YouTube-Subscribe-FF0000?style=flat-square&logo=youtube&logoColor=white)](https://www.youtube.com/@AndreMikalsen) [![CI](https://img.shields.io/github/actions/workflow/status/AndyMik90/Auto-Claude/ci.yml?branch=main&style=flat-square&label=CI)](https://github.com/AndyMik90/Auto-Claude/actions) --- @@ -24,11 +22,11 @@ | Platform | Download | |----------|----------| -| **Windows** | [Auto-Claude-2.7.1-win32-x64.exe](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-win32-x64.exe) | -| **macOS (Apple Silicon)** | [Auto-Claude-2.7.1-darwin-arm64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-darwin-arm64.dmg) | -| **macOS (Intel)** | [Auto-Claude-2.7.1-darwin-x64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-darwin-x64.dmg) | -| **Linux** | [Auto-Claude-2.7.1-linux-x86_64.AppImage](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-linux-x86_64.AppImage) | -| **Linux (Debian)** | [Auto-Claude-2.7.1-linux-amd64.deb](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-linux-amd64.deb) | +| **Windows** | [Auto-Claude-2.7.2-win32-x64.exe](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-win32-x64.exe) | +| **macOS (Apple Silicon)** | [Auto-Claude-2.7.2-darwin-arm64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-darwin-arm64.dmg) | +| **macOS (Intel)** | [Auto-Claude-2.7.2-darwin-x64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-darwin-x64.dmg) | +| **Linux** | [Auto-Claude-2.7.2-linux-x86_64.AppImage](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-linux-x86_64.AppImage) | +| **Linux (Debian)** | [Auto-Claude-2.7.2-linux-amd64.deb](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-linux-amd64.deb) | ### Beta Release @@ -59,7 +57,6 @@ - **Claude Pro/Max subscription** - [Get one here](https://claude.ai/upgrade) - **Claude Code CLI** - `npm install -g @anthropic-ai/claude-code` - **Git repository** - Your project must be initialized as a git repo -- **Python 3.12+** - Required for the backend and Memory Layer --- @@ -148,113 +145,11 @@ See [guides/CLI-USAGE.md](guides/CLI-USAGE.md) for complete CLI documentation. --- -## Configuration +## Development -Create `apps/backend/.env` from the example: +Want to build from source or contribute? See [CONTRIBUTING.md](CONTRIBUTING.md) for complete development setup instructions. -```bash -cp apps/backend/.env.example apps/backend/.env -``` - -| Variable | Required | Description | -|----------|----------|-------------| -| `CLAUDE_CODE_OAUTH_TOKEN` | Yes | OAuth token from `claude setup-token` | -| `GRAPHITI_ENABLED` | No | Enable Memory Layer for cross-session context | -| `AUTO_BUILD_MODEL` | No | Override the default Claude model | -| `GITLAB_TOKEN` | No | GitLab Personal Access Token for GitLab integration | -| `GITLAB_INSTANCE_URL` | No | GitLab instance URL (defaults to gitlab.com) | -| `LINEAR_API_KEY` | No | Linear API key for task sync | - ---- - -## Building from Source - -For contributors and development: - -```bash -# Clone the repository -git clone https://github.com/AndyMik90/Auto-Claude.git -cd Auto-Claude - -# Install all dependencies -npm run install:all - -# Run in development mode -npm run dev - -# Or build and run -npm start -``` - -**System requirements for building:** -- Node.js 24+ -- Python 3.12+ -- npm 10+ - -**Installing dependencies by platform:** - -
-Windows - -```bash -winget install Python.Python.3.12 -winget install OpenJS.NodeJS.LTS -``` - -
- -
-macOS - -```bash -brew install python@3.12 node@24 -``` - -
- -
-Linux (Ubuntu/Debian) - -```bash -sudo apt install python3.12 python3.12-venv -curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash - -sudo apt install -y nodejs -``` - -
- -
-Linux (Fedora) - -```bash -sudo dnf install python3.12 nodejs npm -``` - -
- -See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed development setup. - -### Building Flatpak - -To build the Flatpak package, you need additional dependencies: - -```bash -# Fedora/RHEL -sudo dnf install flatpak-builder - -# Ubuntu/Debian -sudo apt install flatpak-builder - -# Install required Flatpak runtimes -flatpak install flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08 -flatpak install flathub org.electronjs.Electron2.BaseApp//25.08 - -# Build the Flatpak -cd apps/frontend -npm run package:flatpak -``` - -The Flatpak will be created in `apps/frontend/dist/`. +For Linux-specific builds (Flatpak, AppImage), see [guides/linux.md](guides/linux.md). --- @@ -284,7 +179,7 @@ All releases are: | `npm run package:mac` | Package for macOS | | `npm run package:win` | Package for Windows | | `npm run package:linux` | Package for Linux | -| `npm run package:flatpak` | Package as Flatpak | +| `npm run package:flatpak` | Package as Flatpak (see [guides/linux.md](guides/linux.md)) | | `npm run lint` | Run linter | | `npm test` | Run frontend tests | | `npm run test:backend` | Run backend tests | @@ -316,3 +211,11 @@ We welcome contributions! Please read [CONTRIBUTING.md](CONTRIBUTING.md) for: Auto Claude is free to use. If you modify and distribute it, or run it as a service, your code must also be open source under AGPL-3.0. Commercial licensing available for closed-source use cases. + +--- + +## Star History + +[![GitHub Repo stars](https://img.shields.io/github/stars/AndyMik90/Auto-Claude?style=social)](https://github.com/AndyMik90/Auto-Claude/stargazers) + +[![Star History Chart](https://api.star-history.com/svg?repos=AndyMik90/Auto-Claude&type=Date)](https://star-history.com/#AndyMik90/Auto-Claude&Date) diff --git a/RELEASE.md b/RELEASE.md index d7f6eb10dd..21d0e6b53d 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -69,9 +69,38 @@ This will: - Update `apps/frontend/package.json` - Update `package.json` (root) - Update `apps/backend/__init__.py` +- Check if `CHANGELOG.md` has an entry for the new version (warns if missing) - Create a commit with message `chore: bump version to X.Y.Z` -### Step 2: Push and Create PR +### Step 2: Update CHANGELOG.md (REQUIRED) + +**IMPORTANT: The release will fail if CHANGELOG.md doesn't have an entry for the new version.** + +Add release notes to `CHANGELOG.md` at the top of the file: + +```markdown +## 2.8.0 - Your Release Title + +### โœจ New Features +- Feature description + +### ๐Ÿ› ๏ธ Improvements +- Improvement description + +### ๐Ÿ› Bug Fixes +- Fix description + +--- +``` + +Then amend the version bump commit: + +```bash +git add CHANGELOG.md +git commit --amend --no-edit +``` + +### Step 3: Push and Create PR ```bash # Push your branch @@ -81,24 +110,25 @@ git push origin your-branch gh pr create --base main --title "Release v2.8.0" ``` -### Step 3: Merge to Main +### Step 4: Merge to Main Once the PR is approved and merged to `main`, GitHub Actions will automatically: 1. **Detect the version bump** (`prepare-release.yml`) -2. **Create a git tag** (e.g., `v2.8.0`) -3. **Trigger the release workflow** (`release.yml`) -4. **Build binaries** for all platforms: +2. **Validate CHANGELOG.md** has an entry for the new version (FAILS if missing) +3. **Extract release notes** from CHANGELOG.md +4. **Create a git tag** (e.g., `v2.8.0`) +5. **Trigger the release workflow** (`release.yml`) +6. **Build binaries** for all platforms: - macOS Intel (x64) - code signed & notarized - macOS Apple Silicon (arm64) - code signed & notarized - Windows (NSIS installer) - code signed - Linux (AppImage + .deb) -5. **Generate changelog** from merged PRs (using release-drafter) -6. **Scan binaries** with VirusTotal -7. **Create GitHub release** with all artifacts -8. **Update README** with new version badge and download links +7. **Scan binaries** with VirusTotal +8. **Create GitHub release** with release notes from CHANGELOG.md +9. **Update README** with new version badge and download links -### Step 4: Verify +### Step 5: Verify After merging, check: - [GitHub Actions](https://github.com/AndyMik90/Auto-Claude/actions) - ensure all workflows pass @@ -113,28 +143,49 @@ We follow [Semantic Versioning](https://semver.org/): - **MINOR** (0.X.0): New features, backwards compatible - **PATCH** (0.0.X): Bug fixes, backwards compatible -## Changelog Generation +## Changelog Management + +Release notes are managed in `CHANGELOG.md` and used for GitHub releases. + +### Changelog Format -Changelogs are automatically generated from merged PRs using [Release Drafter](https://github.com/release-drafter/release-drafter). +Each version entry in `CHANGELOG.md` should follow this format: -### PR Labels for Changelog Categories +```markdown +## X.Y.Z - Release Title -| Label | Category | -|-------|----------| -| `feature`, `enhancement` | New Features | -| `bug`, `fix` | Bug Fixes | -| `improvement`, `refactor` | Improvements | -| `documentation` | Documentation | -| (any other) | Other Changes | +### โœจ New Features +- Feature description with context -**Tip:** Add appropriate labels to your PRs for better changelog organization. +### ๐Ÿ› ๏ธ Improvements +- Improvement description + +### ๐Ÿ› Bug Fixes +- Fix description + +--- +``` + +### Changelog Validation + +The release workflow **validates** that `CHANGELOG.md` has an entry for the version being released: + +- If the entry is **missing**, the release is **blocked** with a clear error message +- If the entry **exists**, its content is used for the GitHub release notes + +### Writing Good Release Notes + +- **Be specific**: Instead of "Fixed bug", write "Fixed crash when opening large files" +- **Group by impact**: Features first, then improvements, then fixes +- **Credit contributors**: Mention contributors for significant changes +- **Link issues**: Reference GitHub issues where relevant (e.g., "Fixes #123") ## Workflows | Workflow | Trigger | Purpose | |----------|---------|---------| -| `prepare-release.yml` | Push to `main` | Detects version bump, creates tag | -| `release.yml` | Tag `v*` pushed | Builds binaries, creates release | +| `prepare-release.yml` | Push to `main` | Detects version bump, **validates CHANGELOG.md**, creates tag | +| `release.yml` | Tag `v*` pushed | Builds binaries, extracts changelog, creates release | | `validate-version.yml` | Tag `v*` pushed | Validates tag matches package.json | | `update-readme` (in release.yml) | After release | Updates README with new version | @@ -153,6 +204,22 @@ Changelogs are automatically generated from merged PRs using [Release Drafter](h git diff HEAD~1 --name-only | grep package.json ``` +### Release blocked: Missing changelog entry + +If you see "CHANGELOG VALIDATION FAILED" in the workflow: + +1. The `prepare-release.yml` workflow validated that `CHANGELOG.md` doesn't have an entry for the new version +2. **Fix**: Add an entry to `CHANGELOG.md` with the format `## X.Y.Z - Title` +3. Commit and push the changelog update +4. The workflow will automatically retry when the changes are pushed to `main` + +```bash +# Add changelog entry, then: +git add CHANGELOG.md +git commit -m "docs: add changelog for vX.Y.Z" +git push origin main +``` + ### Build failed after tag was created - The release won't be published if builds fail diff --git a/apps/backend/agents/README.md b/apps/backend/agents/README.md index 1cf2b2fb81..85253eae26 100644 --- a/apps/backend/agents/README.md +++ b/apps/backend/agents/README.md @@ -26,7 +26,7 @@ auto-claude/agents/ ### `utils.py` (3.6 KB) - Git operations: `get_latest_commit()`, `get_commit_count()` - Plan management: `load_implementation_plan()`, `find_subtask_in_plan()`, `find_phase_for_subtask()` -- Workspace sync: `sync_plan_to_source()` +- Workspace sync: `sync_spec_to_source()` ### `memory.py` (13 KB) - Dual-layer memory system (Graphiti primary, file-based fallback) @@ -73,7 +73,7 @@ from agents import ( # Utilities get_latest_commit, load_implementation_plan, - sync_plan_to_source, + sync_spec_to_source, ) ``` diff --git a/apps/backend/agents/__init__.py b/apps/backend/agents/__init__.py index 37dae174c4..4eed468607 100644 --- a/apps/backend/agents/__init__.py +++ b/apps/backend/agents/__init__.py @@ -14,6 +14,10 @@ Uses lazy imports to avoid circular dependencies. """ +# Explicit import required by CodeQL static analysis +# (CodeQL doesn't recognize __getattr__ dynamic exports) +from .utils import sync_spec_to_source + __all__ = [ # Main API "run_autonomous_agent", @@ -32,7 +36,7 @@ "load_implementation_plan", "find_subtask_in_plan", "find_phase_for_subtask", - "sync_plan_to_source", + "sync_spec_to_source", # Constants "AUTO_CONTINUE_DELAY_SECONDS", "HUMAN_INTERVENTION_FILE", @@ -77,7 +81,7 @@ def __getattr__(name): "get_commit_count", "get_latest_commit", "load_implementation_plan", - "sync_plan_to_source", + "sync_spec_to_source", ): from .utils import ( find_phase_for_subtask, @@ -85,7 +89,7 @@ def __getattr__(name): get_commit_count, get_latest_commit, load_implementation_plan, - sync_plan_to_source, + sync_spec_to_source, ) return locals()[name] diff --git a/apps/backend/agents/coder.py b/apps/backend/agents/coder.py index 39d43b30a0..863aef1c7d 100644 --- a/apps/backend/agents/coder.py +++ b/apps/backend/agents/coder.py @@ -7,6 +7,7 @@ import asyncio import logging +import os from pathlib import Path from core.client import create_client @@ -37,6 +38,7 @@ ) from prompts import is_first_run from recovery import RecoveryManager +from security.constants import PROJECT_DIR_ENV_VAR from task_logger import ( LogPhase, get_task_logger, @@ -62,7 +64,7 @@ get_commit_count, get_latest_commit, load_implementation_plan, - sync_plan_to_source, + sync_spec_to_source, ) logger = logging.getLogger(__name__) @@ -90,6 +92,10 @@ async def run_autonomous_agent( verbose: Whether to show detailed output source_spec_dir: Original spec directory in main project (for syncing from worktree) """ + # Set environment variable for security hooks to find the correct project directory + # This is needed because os.getcwd() may return the wrong directory in worktree mode + os.environ[PROJECT_DIR_ENV_VAR] = str(project_dir.resolve()) + # Initialize recovery manager (handles memory persistence) recovery_manager = RecoveryManager(spec_dir, project_dir) @@ -404,7 +410,7 @@ async def run_autonomous_agent( print_status("Linear notified of stuck subtask", "info") elif is_planning_phase and source_spec_dir: # After planning phase, sync the newly created implementation plan back to source - if sync_plan_to_source(spec_dir, source_spec_dir): + if sync_spec_to_source(spec_dir, source_spec_dir): print_status("Implementation plan synced to main project", "success") # Handle session status diff --git a/apps/backend/agents/session.py b/apps/backend/agents/session.py index 89a5d5d48c..263bf17efb 100644 --- a/apps/backend/agents/session.py +++ b/apps/backend/agents/session.py @@ -40,7 +40,7 @@ get_commit_count, get_latest_commit, load_implementation_plan, - sync_plan_to_source, + sync_spec_to_source, ) logger = logging.getLogger(__name__) @@ -82,7 +82,7 @@ async def post_session_processing( print(muted("--- Post-Session Processing ---")) # Sync implementation plan back to source (for worktree mode) - if sync_plan_to_source(spec_dir, source_spec_dir): + if sync_spec_to_source(spec_dir, source_spec_dir): print_status("Implementation plan synced to main project", "success") # Check if implementation plan was updated @@ -445,8 +445,9 @@ async def run_agent_session( result_content = getattr(block, "content", "") is_error = getattr(block, "is_error", False) - # Check if command was blocked by security hook - if "blocked" in str(result_content).lower(): + # Check if this is an error (not just content containing "blocked") + if is_error and "blocked" in str(result_content).lower(): + # Actual blocked command by security hook debug_error( "session", f"Tool BLOCKED: {current_tool}", diff --git a/apps/backend/agents/tools_pkg/tools/memory.py b/apps/backend/agents/tools_pkg/tools/memory.py index ac361ab78c..b5367663e9 100644 --- a/apps/backend/agents/tools_pkg/tools/memory.py +++ b/apps/backend/agents/tools_pkg/tools/memory.py @@ -4,9 +4,16 @@ Tools for recording and retrieving session memory, including discoveries, gotchas, and patterns. + +Dual-storage approach: +- File-based: Always available, works offline, spec-specific +- LadybugDB: When Graphiti is enabled, also saves to graph database for + cross-session retrieval and Memory UI display """ +import asyncio import json +import logging from datetime import datetime, timezone from pathlib import Path from typing import Any @@ -19,6 +26,108 @@ SDK_TOOLS_AVAILABLE = False tool = None +logger = logging.getLogger(__name__) + + +async def _save_to_graphiti_async( + spec_dir: Path, + project_dir: Path, + save_type: str, + data: dict, +) -> bool: + """ + Save data to Graphiti/LadybugDB (async implementation). + + Args: + spec_dir: Spec directory for GraphitiMemory initialization + project_dir: Project root directory + save_type: Type of save - 'discovery', 'gotcha', or 'pattern' + data: Data to save + + Returns: + True if save succeeded, False otherwise + """ + try: + # Check if Graphiti is enabled + from graphiti_config import is_graphiti_enabled + + if not is_graphiti_enabled(): + return False + + from integrations.graphiti.queries_pkg.graphiti import GraphitiMemory + + memory = GraphitiMemory(spec_dir, project_dir) + try: + if save_type == "discovery": + # Save as codebase discovery + # Format: {file_path: description} + result = await memory.save_codebase_discoveries( + {data["file_path"]: data["description"]} + ) + elif save_type == "gotcha": + # Save as gotcha + gotcha_text = data["gotcha"] + if data.get("context"): + gotcha_text += f" (Context: {data['context']})" + result = await memory.save_gotcha(gotcha_text) + elif save_type == "pattern": + # Save as pattern + result = await memory.save_pattern(data["pattern"]) + else: + result = False + return result + finally: + await memory.close() + + except ImportError as e: + logger.debug(f"Graphiti not available for memory tools: {e}") + return False + except Exception as e: + logger.warning(f"Failed to save to Graphiti: {e}") + return False + + +def _save_to_graphiti_sync( + spec_dir: Path, + project_dir: Path, + save_type: str, + data: dict, +) -> bool: + """ + Save data to Graphiti/LadybugDB (synchronous wrapper for sync contexts only). + + NOTE: This should only be called from synchronous code. For async callers, + use _save_to_graphiti_async() directly to ensure proper resource cleanup. + + Args: + spec_dir: Spec directory for GraphitiMemory initialization + project_dir: Project root directory + save_type: Type of save - 'discovery', 'gotcha', or 'pattern' + data: Data to save + + Returns: + True if save succeeded, False otherwise + """ + try: + # Check if we're already in an async context + try: + asyncio.get_running_loop() + # We're in an async context - caller should use _save_to_graphiti_async + # Log a warning and return False to avoid the resource leak bug + logger.warning( + "_save_to_graphiti_sync called from async context. " + "Use _save_to_graphiti_async instead for proper cleanup." + ) + return False + except RuntimeError: + # No running loop - safe to create one + return asyncio.run( + _save_to_graphiti_async(spec_dir, project_dir, save_type, data) + ) + except Exception as e: + logger.warning(f"Failed to save to Graphiti: {e}") + return False + def create_memory_tools(spec_dir: Path, project_dir: Path) -> list: """ @@ -45,7 +154,7 @@ def create_memory_tools(spec_dir: Path, project_dir: Path) -> list: {"file_path": str, "description": str, "category": str}, ) async def record_discovery(args: dict[str, Any]) -> dict[str, Any]: - """Record a discovery to the codebase map.""" + """Record a discovery to the codebase map (file + Graphiti).""" file_path = args["file_path"] description = args["description"] category = args.get("category", "general") @@ -54,8 +163,10 @@ async def record_discovery(args: dict[str, Any]) -> dict[str, Any]: memory_dir.mkdir(exist_ok=True) codebase_map_file = memory_dir / "codebase_map.json" + saved_to_graphiti = False try: + # PRIMARY: Save to file-based storage (always works) # Load existing map or create new if codebase_map_file.exists(): with open(codebase_map_file) as f: @@ -77,11 +188,23 @@ async def record_discovery(args: dict[str, Any]) -> dict[str, Any]: with open(codebase_map_file, "w") as f: json.dump(codebase_map, f, indent=2) + # SECONDARY: Also save to Graphiti/LadybugDB (for Memory UI) + saved_to_graphiti = await _save_to_graphiti_async( + spec_dir, + project_dir, + "discovery", + { + "file_path": file_path, + "description": f"[{category}] {description}", + }, + ) + + storage_note = " (also saved to memory graph)" if saved_to_graphiti else "" return { "content": [ { "type": "text", - "text": f"Recorded discovery for '{file_path}': {description}", + "text": f"Recorded discovery for '{file_path}': {description}{storage_note}", } ] } @@ -102,7 +225,7 @@ async def record_discovery(args: dict[str, Any]) -> dict[str, Any]: {"gotcha": str, "context": str}, ) async def record_gotcha(args: dict[str, Any]) -> dict[str, Any]: - """Record a gotcha to session memory.""" + """Record a gotcha to session memory (file + Graphiti).""" gotcha = args["gotcha"] context = args.get("context", "") @@ -110,8 +233,10 @@ async def record_gotcha(args: dict[str, Any]) -> dict[str, Any]: memory_dir.mkdir(exist_ok=True) gotchas_file = memory_dir / "gotchas.md" + saved_to_graphiti = False try: + # PRIMARY: Save to file-based storage (always works) timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M") entry = f"\n## [{timestamp}]\n{gotcha}" @@ -126,7 +251,20 @@ async def record_gotcha(args: dict[str, Any]) -> dict[str, Any]: ) f.write(entry) - return {"content": [{"type": "text", "text": f"Recorded gotcha: {gotcha}"}]} + # SECONDARY: Also save to Graphiti/LadybugDB (for Memory UI) + saved_to_graphiti = await _save_to_graphiti_async( + spec_dir, + project_dir, + "gotcha", + {"gotcha": gotcha, "context": context}, + ) + + storage_note = " (also saved to memory graph)" if saved_to_graphiti else "" + return { + "content": [ + {"type": "text", "text": f"Recorded gotcha: {gotcha}{storage_note}"} + ] + } except Exception as e: return { diff --git a/apps/backend/agents/utils.py b/apps/backend/agents/utils.py index 8ce33c9224..614cdb795a 100644 --- a/apps/backend/agents/utils.py +++ b/apps/backend/agents/utils.py @@ -8,40 +8,38 @@ import json import logging import shutil -import subprocess from pathlib import Path +from core.git_executable import run_git + logger = logging.getLogger(__name__) def get_latest_commit(project_dir: Path) -> str | None: """Get the hash of the latest git commit.""" - try: - result = subprocess.run( - ["git", "rev-parse", "HEAD"], - cwd=project_dir, - capture_output=True, - text=True, - check=True, - ) + result = run_git( + ["rev-parse", "HEAD"], + cwd=project_dir, + timeout=10, + ) + if result.returncode == 0: return result.stdout.strip() - except subprocess.CalledProcessError: - return None + return None def get_commit_count(project_dir: Path) -> int: """Get the total number of commits.""" - try: - result = subprocess.run( - ["git", "rev-list", "--count", "HEAD"], - cwd=project_dir, - capture_output=True, - text=True, - check=True, - ) - return int(result.stdout.strip()) - except (subprocess.CalledProcessError, ValueError): - return 0 + result = run_git( + ["rev-list", "--count", "HEAD"], + cwd=project_dir, + timeout=10, + ) + if result.returncode == 0: + try: + return int(result.stdout.strip()) + except ValueError: + return 0 + return 0 def load_implementation_plan(spec_dir: Path) -> dict | None: @@ -74,16 +72,32 @@ def find_phase_for_subtask(plan: dict, subtask_id: str) -> dict | None: return None -def sync_plan_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool: +def sync_spec_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool: """ - Sync implementation_plan.json from worktree back to source spec directory. - - When running in isolated mode (worktrees), the agent updates the implementation - plan inside the worktree. This function syncs those changes back to the main - project's spec directory so the frontend/UI can see the progress. + Sync ALL spec files from worktree back to source spec directory. + + When running in isolated mode (worktrees), the agent creates and updates + many files inside the worktree's spec directory. This function syncs ALL + of them back to the main project's spec directory. + + IMPORTANT: Since .auto-claude/ is gitignored, this sync happens to the + local filesystem regardless of what branch the user is on. The worktree + may be on a different branch (e.g., auto-claude/093-task), but the sync + target is always the main project's .auto-claude/specs/ directory. + + Files synced (all files in spec directory): + - implementation_plan.json - Task status and subtask completion + - build-progress.txt - Session-by-session progress notes + - task_logs.json - Execution logs + - review_state.json - QA review state + - critique_report.json - Spec critique findings + - suggested_commit_message.txt - Commit suggestions + - REGRESSION_TEST_REPORT.md - Test regression report + - spec.md, context.json, etc. - Original spec files (for completeness) + - memory/ directory - Codebase map, patterns, gotchas, session insights Args: - spec_dir: Current spec directory (may be inside worktree) + spec_dir: Current spec directory (inside worktree) source_spec_dir: Original spec directory in main project (outside worktree) Returns: @@ -100,17 +114,68 @@ def sync_plan_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool: if spec_dir_resolved == source_spec_dir_resolved: return False # Same directory, no sync needed - # Sync the implementation plan - plan_file = spec_dir / "implementation_plan.json" - if not plan_file.exists(): - return False + synced_any = False - source_plan_file = source_spec_dir / "implementation_plan.json" + # Ensure source directory exists + source_spec_dir.mkdir(parents=True, exist_ok=True) try: - shutil.copy2(plan_file, source_plan_file) - logger.debug(f"Synced implementation plan to source: {source_plan_file}") - return True + # Sync all files and directories from worktree spec to source spec + for item in spec_dir.iterdir(): + # Skip symlinks to prevent path traversal attacks + if item.is_symlink(): + logger.warning(f"Skipping symlink during sync: {item.name}") + continue + + source_item = source_spec_dir / item.name + + if item.is_file(): + # Copy file (preserves timestamps) + shutil.copy2(item, source_item) + logger.debug(f"Synced {item.name} to source") + synced_any = True + + elif item.is_dir(): + # Recursively sync directory + _sync_directory(item, source_item) + synced_any = True + except Exception as e: - logger.warning(f"Failed to sync implementation plan to source: {e}") - return False + logger.warning(f"Failed to sync spec directory to source: {e}") + + return synced_any + + +def _sync_directory(source_dir: Path, target_dir: Path) -> None: + """ + Recursively sync a directory from source to target. + + Args: + source_dir: Source directory (in worktree) + target_dir: Target directory (in main project) + """ + # Create target directory if needed + target_dir.mkdir(parents=True, exist_ok=True) + + for item in source_dir.iterdir(): + # Skip symlinks to prevent path traversal attacks + if item.is_symlink(): + logger.warning( + f"Skipping symlink during sync: {source_dir.name}/{item.name}" + ) + continue + + target_item = target_dir / item.name + + if item.is_file(): + shutil.copy2(item, target_item) + logger.debug(f"Synced {source_dir.name}/{item.name} to source") + elif item.is_dir(): + # Recurse into subdirectories + _sync_directory(item, target_item) + + +# Keep the old name as an alias for backward compatibility +def sync_plan_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool: + """Alias for sync_spec_to_source for backward compatibility.""" + return sync_spec_to_source(spec_dir, source_spec_dir) diff --git a/apps/backend/analysis/insight_extractor.py b/apps/backend/analysis/insight_extractor.py index 75974d6b59..7b461afbae 100644 --- a/apps/backend/analysis/insight_extractor.py +++ b/apps/backend/analysis/insight_extractor.py @@ -387,12 +387,40 @@ async def run_insight_extraction( # Collect the response response_text = "" + message_count = 0 + text_blocks_found = 0 + async for msg in client.receive_response(): msg_type = type(msg).__name__ + message_count += 1 + if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): - response_text += block.text + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): + text_blocks_found += 1 + if block.text: # Only add non-empty text + response_text += block.text + else: + logger.debug( + f"Found empty TextBlock in response (block #{text_blocks_found})" + ) + + # Log response collection summary + logger.debug( + f"Insight extraction response: {message_count} messages, " + f"{text_blocks_found} text blocks, {len(response_text)} chars collected" + ) + + # Validate we received content before parsing + if not response_text.strip(): + logger.warning( + f"Insight extraction returned empty response. " + f"Messages received: {message_count}, TextBlocks found: {text_blocks_found}. " + f"This may indicate the AI model did not respond with text content." + ) + return None # Parse JSON from response return parse_insights(response_text) @@ -415,6 +443,11 @@ def parse_insights(response_text: str) -> dict | None: # Try to extract JSON from the response text = response_text.strip() + # Early validation - check for empty response + if not text: + logger.warning("Cannot parse insights: response text is empty") + return None + # Handle markdown code blocks if text.startswith("```"): # Remove code block markers @@ -422,17 +455,26 @@ def parse_insights(response_text: str) -> dict | None: # Remove first line (```json or ```) if lines[0].startswith("```"): lines = lines[1:] - # Remove last line if it's `` + # Remove last line if it's ``` if lines and lines[-1].strip() == "```": lines = lines[:-1] - text = "\n".join(lines) + text = "\n".join(lines).strip() + + # Check again after removing code blocks + if not text: + logger.warning( + "Cannot parse insights: response contained only markdown code block markers with no content" + ) + return None try: insights = json.loads(text) # Validate structure if not isinstance(insights, dict): - logger.warning("Insights is not a dict") + logger.warning( + f"Insights is not a dict, got type: {type(insights).__name__}" + ) return None # Ensure required keys exist with defaults @@ -446,7 +488,13 @@ def parse_insights(response_text: str) -> dict | None: except json.JSONDecodeError as e: logger.warning(f"Failed to parse insights JSON: {e}") - logger.debug(f"Response text was: {text[:500]}") + # Show more context in the error message + preview_length = min(500, len(text)) + logger.warning( + f"Response text preview (first {preview_length} chars): {text[:preview_length]}" + ) + if len(text) > preview_length: + logger.warning(f"... (total length: {len(text)} chars)") return None diff --git a/apps/backend/cli/batch_commands.py b/apps/backend/cli/batch_commands.py index 28a82ea90a..959df5eeac 100644 --- a/apps/backend/cli/batch_commands.py +++ b/apps/backend/cli/batch_commands.py @@ -6,6 +6,8 @@ """ import json +import shutil +import subprocess from pathlib import Path from ui import highlight, print_status @@ -184,7 +186,7 @@ def handle_batch_cleanup_command(project_dir: str, dry_run: bool = True) -> bool True if successful """ specs_dir = Path(project_dir) / ".auto-claude" / "specs" - worktrees_dir = Path(project_dir) / ".worktrees" + worktrees_dir = Path(project_dir) / ".auto-claude" / "worktrees" / "tasks" if not specs_dir.exists(): print_status("No specs directory found", "info") @@ -209,8 +211,56 @@ def handle_batch_cleanup_command(project_dir: str, dry_run: bool = True) -> bool print(f" - {spec_name}") wt_path = worktrees_dir / spec_name if wt_path.exists(): - print(f" โ””โ”€ .worktrees/{spec_name}/") + print(f" โ””โ”€ .auto-claude/worktrees/tasks/{spec_name}/") print() print("Run with --no-dry-run to actually delete") + else: + # Actually delete specs and worktrees + deleted_count = 0 + for spec_name in completed: + spec_path = specs_dir / spec_name + wt_path = worktrees_dir / spec_name + + # Remove worktree first (if exists) + if wt_path.exists(): + try: + result = subprocess.run( + ["git", "worktree", "remove", "--force", str(wt_path)], + cwd=project_dir, + capture_output=True, + text=True, + timeout=30, + ) + if result.returncode == 0: + print_status(f"Removed worktree: {spec_name}", "success") + else: + # Fallback: remove directory manually if git fails + shutil.rmtree(wt_path, ignore_errors=True) + print_status( + f"Removed worktree directory: {spec_name}", "success" + ) + except subprocess.TimeoutExpired: + # Timeout: fall back to manual removal + shutil.rmtree(wt_path, ignore_errors=True) + print_status( + f"Worktree removal timed out, removed directory: {spec_name}", + "warning", + ) + except Exception as e: + print_status( + f"Failed to remove worktree {spec_name}: {e}", "warning" + ) + + # Remove spec directory + if spec_path.exists(): + try: + shutil.rmtree(spec_path) + print_status(f"Removed spec: {spec_name}", "success") + deleted_count += 1 + except Exception as e: + print_status(f"Failed to remove spec {spec_name}: {e}", "error") + + print() + print_status(f"Cleaned up {deleted_count} spec(s)", "info") return True diff --git a/apps/backend/cli/build_commands.py b/apps/backend/cli/build_commands.py index 19dc17ca6b..ad5766ac54 100644 --- a/apps/backend/cli/build_commands.py +++ b/apps/backend/cli/build_commands.py @@ -79,7 +79,7 @@ def handle_build_command( base_branch: Base branch for worktree creation (default: current branch) """ # Lazy imports to avoid loading heavy modules - from agent import run_autonomous_agent, sync_plan_to_source + from agent import run_autonomous_agent, sync_spec_to_source from debug import ( debug, debug_info, @@ -274,7 +274,7 @@ def handle_build_command( # Sync implementation plan to main project after QA # This ensures the main project has the latest status (human_review) - if sync_plan_to_source(spec_dir, source_spec_dir): + if sync_spec_to_source(spec_dir, source_spec_dir): debug_info( "run.py", "Implementation plan synced to main project after QA" ) diff --git a/apps/backend/cli/utils.py b/apps/backend/cli/utils.py index f18954654a..0e2a7b427a 100644 --- a/apps/backend/cli/utils.py +++ b/apps/backend/cli/utils.py @@ -15,7 +15,47 @@ sys.path.insert(0, str(_PARENT_DIR)) from core.auth import get_auth_token, get_auth_token_source -from dotenv import load_dotenv +from core.dependency_validator import validate_platform_dependencies + + +def import_dotenv(): + """ + Import and return load_dotenv with helpful error message if not installed. + + This centralized function ensures consistent error messaging across all + runner scripts when python-dotenv is not available. + + Returns: + The load_dotenv function + + Raises: + SystemExit: If dotenv cannot be imported, with helpful installation instructions. + """ + try: + from dotenv import load_dotenv as _load_dotenv + + return _load_dotenv + except ImportError: + sys.exit( + "Error: Required Python package 'python-dotenv' is not installed.\n" + "\n" + "This usually means you're not using the virtual environment.\n" + "\n" + "To fix this:\n" + "1. From the 'apps/backend/' directory, activate the venv:\n" + " source .venv/bin/activate # Linux/macOS\n" + " .venv\\Scripts\\activate # Windows\n" + "\n" + "2. Or install dependencies directly:\n" + " pip install python-dotenv\n" + " pip install -r requirements.txt\n" + "\n" + f"Current Python: {sys.executable}\n" + ) + + +# Load .env with helpful error if dependencies not installed +load_dotenv = import_dotenv() from graphiti_config import get_graphiti_status from linear_integration import LinearManager from linear_updater import is_linear_enabled @@ -28,8 +68,8 @@ muted, ) -# Configuration -DEFAULT_MODEL = "claude-opus-4-5-20251101" +# Configuration - uses shorthand that resolves via API Profile if configured +DEFAULT_MODEL = "sonnet" # Changed from "opus" (fix #433) def setup_environment() -> Path: @@ -82,7 +122,7 @@ def find_spec(project_dir: Path, spec_identifier: str) -> Path | None: return spec_folder # Check worktree specs (for merge-preview, merge, review, discard operations) - worktree_base = project_dir / ".worktrees" + worktree_base = project_dir / ".auto-claude" / "worktrees" / "tasks" if worktree_base.exists(): # Try exact match in worktree worktree_spec = ( @@ -115,6 +155,9 @@ def validate_environment(spec_dir: Path) -> bool: Returns: True if valid, False otherwise (with error messages printed) """ + # Validate platform-specific dependencies first (exits if missing) + validate_platform_dependencies() + valid = True # Check for OAuth token (API keys are not supported) diff --git a/apps/backend/cli/workspace_commands.py b/apps/backend/cli/workspace_commands.py index 5e3d68a5aa..e6f2509182 100644 --- a/apps/backend/cli/workspace_commands.py +++ b/apps/backend/cli/workspace_commands.py @@ -22,6 +22,7 @@ get_merge_base, is_lock_file, ) +from core.worktree import WorktreeManager from debug import debug_warning from ui import ( Icons, @@ -67,6 +68,7 @@ def _detect_default_branch(project_dir: Path) -> str: cwd=project_dir, capture_output=True, text=True, + timeout=5, ) if result.returncode == 0: return env_branch @@ -78,6 +80,7 @@ def _detect_default_branch(project_dir: Path) -> str: cwd=project_dir, capture_output=True, text=True, + timeout=5, ) if result.returncode == 0: return branch @@ -90,18 +93,32 @@ def _get_changed_files_from_git( worktree_path: Path, base_branch: str = "main" ) -> list[str]: """ - Get list of changed files from git diff between base branch and HEAD. + Get list of files changed by the task (not files changed on base branch). + + Uses merge-base to accurately identify only the files modified in the worktree, + not files that changed on the base branch since the worktree was created. Args: worktree_path: Path to the worktree base_branch: Base branch to compare against (default: main) Returns: - List of changed file paths + List of changed file paths (task changes only) """ try: + # First, get the merge-base (the point where the worktree branched) + merge_base_result = subprocess.run( + ["git", "merge-base", base_branch, "HEAD"], + cwd=worktree_path, + capture_output=True, + text=True, + check=True, + ) + merge_base = merge_base_result.stdout.strip() + + # Use two-dot diff from merge-base to get only task's changes result = subprocess.run( - ["git", "diff", "--name-only", f"{base_branch}...HEAD"], + ["git", "diff", "--name-only", f"{merge_base}..HEAD"], cwd=worktree_path, capture_output=True, text=True, @@ -113,10 +130,10 @@ def _get_changed_files_from_git( # Log the failure before trying fallback debug_warning( "workspace_commands", - f"git diff (three-dot) failed: returncode={e.returncode}, " + f"git diff with merge-base failed: returncode={e.returncode}, " f"stderr={e.stderr.strip() if e.stderr else 'N/A'}", ) - # Fallback: try without the three-dot notation + # Fallback: try direct two-arg diff (less accurate but works) try: result = subprocess.run( ["git", "diff", "--name-only", base_branch, "HEAD"], @@ -131,12 +148,178 @@ def _get_changed_files_from_git( # Log the failure before returning empty list debug_warning( "workspace_commands", - f"git diff (two-arg) failed: returncode={e.returncode}, " + f"git diff (fallback) failed: returncode={e.returncode}, " f"stderr={e.stderr.strip() if e.stderr else 'N/A'}", ) return [] +def _detect_worktree_base_branch( + project_dir: Path, + worktree_path: Path, + spec_name: str, +) -> str | None: + """ + Detect which branch a worktree was created from. + + Tries multiple strategies: + 1. Check worktree config file (.auto-claude/worktree-config.json) + 2. Find merge-base with known branches (develop, main, master) + 3. Return None if unable to detect + + Args: + project_dir: Project root directory + worktree_path: Path to the worktree + spec_name: Name of the spec + + Returns: + The detected base branch name, or None if unable to detect + """ + import json + + # Strategy 1: Check for worktree config file + config_path = worktree_path / ".auto-claude" / "worktree-config.json" + if config_path.exists(): + try: + config = json.loads(config_path.read_text()) + if config.get("base_branch"): + debug( + MODULE, + f"Found base branch in worktree config: {config['base_branch']}", + ) + return config["base_branch"] + except Exception as e: + debug_warning(MODULE, f"Failed to read worktree config: {e}") + + # Strategy 2: Find which branch has the closest merge-base + # Check common branches: develop, main, master + spec_branch = f"auto-claude/{spec_name}" + candidate_branches = ["develop", "main", "master"] + + best_branch = None + best_commits_behind = float("inf") + + for branch in candidate_branches: + try: + # Check if branch exists + check = subprocess.run( + ["git", "rev-parse", "--verify", branch], + cwd=project_dir, + capture_output=True, + text=True, + ) + if check.returncode != 0: + continue + + # Get merge base + merge_base_result = subprocess.run( + ["git", "merge-base", branch, spec_branch], + cwd=project_dir, + capture_output=True, + text=True, + ) + if merge_base_result.returncode != 0: + continue + + merge_base = merge_base_result.stdout.strip() + + # Count commits between merge-base and branch tip + # The branch with fewer commits ahead is likely the one we branched from + ahead_result = subprocess.run( + ["git", "rev-list", "--count", f"{merge_base}..{branch}"], + cwd=project_dir, + capture_output=True, + text=True, + ) + if ahead_result.returncode == 0: + commits_ahead = int(ahead_result.stdout.strip()) + debug( + MODULE, + f"Branch {branch} is {commits_ahead} commits ahead of merge-base", + ) + if commits_ahead < best_commits_behind: + best_commits_behind = commits_ahead + best_branch = branch + except Exception as e: + debug_warning(MODULE, f"Error checking branch {branch}: {e}") + continue + + if best_branch: + debug( + MODULE, + f"Detected base branch from git history: {best_branch} (commits ahead: {best_commits_behind})", + ) + return best_branch + + return None + + +def _detect_parallel_task_conflicts( + project_dir: Path, + current_task_id: str, + current_task_files: list[str], +) -> list[dict]: + """ + Detect potential conflicts between this task and other active tasks. + + Uses existing evolution data to check if any of this task's files + have been modified by other active tasks. This is a lightweight check + that doesn't require re-processing all files. + + Args: + project_dir: Project root directory + current_task_id: ID of the current task + current_task_files: Files modified by this task (from git diff) + + Returns: + List of conflict dictionaries with 'file' and 'tasks' keys + """ + try: + from merge import MergeOrchestrator + + # Initialize orchestrator just to access evolution data + orchestrator = MergeOrchestrator( + project_dir, + enable_ai=False, + dry_run=True, + ) + + # Get all active tasks from evolution data + active_tasks = orchestrator.evolution_tracker.get_active_tasks() + + # Remove current task from active tasks + other_active_tasks = active_tasks - {current_task_id} + + if not other_active_tasks: + return [] + + # Convert current task files to a set for fast lookup + current_files_set = set(current_task_files) + + # Get files modified by other active tasks + conflicts = [] + other_task_files = orchestrator.evolution_tracker.get_files_modified_by_tasks( + list(other_active_tasks) + ) + + # Find intersection - files modified by both this task and other tasks + for file_path, tasks in other_task_files.items(): + if file_path in current_files_set: + # This file was modified by both current task and other task(s) + all_tasks = [current_task_id] + tasks + conflicts.append({"file": file_path, "tasks": all_tasks}) + + return conflicts + + except Exception as e: + # If anything fails, just return empty - parallel task detection is optional + debug_warning( + "workspace_commands", + f"Parallel task conflict detection failed: {e}", + ) + return [] + + # Import debug utilities try: from debug import ( @@ -352,7 +535,9 @@ def handle_cleanup_worktrees_command(project_dir: Path) -> None: cleanup_all_worktrees(project_dir, confirm=True) -def _check_git_merge_conflicts(project_dir: Path, spec_name: str) -> dict: +def _check_git_merge_conflicts( + project_dir: Path, spec_name: str, base_branch: str | None = None +) -> dict: """ Check for git-level merge conflicts WITHOUT modifying the working directory. @@ -362,6 +547,7 @@ def _check_git_merge_conflicts(project_dir: Path, spec_name: str) -> dict: Args: project_dir: Project root directory spec_name: Name of the spec + base_branch: Branch the task was created from (default: auto-detect) Returns: Dictionary with git conflict information: @@ -380,21 +566,25 @@ def _check_git_merge_conflicts(project_dir: Path, spec_name: str) -> dict: "has_conflicts": False, "conflicting_files": [], "needs_rebase": False, - "base_branch": "main", + "base_branch": base_branch or "main", "spec_branch": spec_branch, "commits_behind": 0, } try: - # Get the current branch (base branch) - base_result = subprocess.run( - ["git", "rev-parse", "--abbrev-ref", "HEAD"], - cwd=project_dir, - capture_output=True, - text=True, - ) - if base_result.returncode == 0: - result["base_branch"] = base_result.stdout.strip() + # Use provided base_branch, or detect from current HEAD + if not base_branch: + base_result = subprocess.run( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + cwd=project_dir, + capture_output=True, + text=True, + ) + if base_result.returncode == 0: + result["base_branch"] = base_result.stdout.strip() + else: + result["base_branch"] = base_branch + debug(MODULE, f"Using provided base branch: {base_branch}") # Get the merge base commit merge_base_result = subprocess.run( @@ -553,7 +743,6 @@ def handle_merge_preview_command( spec_name=spec_name, ) - from merge import MergeOrchestrator from workspace import get_existing_build_worktree worktree_path = get_existing_build_worktree(project_dir, spec_name) @@ -580,16 +769,32 @@ def handle_merge_preview_command( } try: - # First, check for git-level conflicts (diverged branches) - git_conflicts = _check_git_merge_conflicts(project_dir, spec_name) - # Determine the task's source branch (where the task was created from) - # Use provided base_branch (from task metadata), or fall back to detected default + # Priority: + # 1. Provided base_branch (from task metadata) + # 2. Detect from worktree's git history (find which branch it diverged from) + # 3. Fall back to default branch detection (main/master) task_source_branch = base_branch if not task_source_branch: - # Auto-detect the default branch (main/master) that worktrees are typically created from + # Try to detect from worktree's git history + task_source_branch = _detect_worktree_base_branch( + project_dir, worktree_path, spec_name + ) + if not task_source_branch: + # Fall back to auto-detecting main/master task_source_branch = _detect_default_branch(project_dir) + debug( + MODULE, + f"Using task source branch: {task_source_branch}", + provided=base_branch is not None, + ) + + # Check for git-level conflicts (diverged branches) using the task's source branch + git_conflicts = _check_git_merge_conflicts( + project_dir, spec_name, base_branch=task_source_branch + ) + # Get actual changed files from git diff (this is the authoritative count) all_changed_files = _get_changed_files_from_git( worktree_path, task_source_branch @@ -600,49 +805,39 @@ def handle_merge_preview_command( changed_files=all_changed_files[:10], # Log first 10 ) - debug(MODULE, "Initializing MergeOrchestrator for preview...") + # OPTIMIZATION: Skip expensive refresh_from_git() and preview_merge() calls + # For merge-preview, we only need to detect: + # 1. Git conflicts (task vs base branch) - already calculated in _check_git_merge_conflicts() + # 2. Parallel task conflicts (this task vs other active tasks) + # + # For parallel task detection, we just check if this task's files overlap + # with files OTHER tasks have already recorded - no need to re-process all files. - # Initialize the orchestrator - orchestrator = MergeOrchestrator( - project_dir, - enable_ai=False, # Don't use AI for preview - dry_run=True, # Don't write anything - ) + debug(MODULE, "Checking for parallel task conflicts (lightweight)...") - # Refresh evolution data from the worktree - # Compare against the task's source branch (where the task was created from) + # Check for parallel task conflicts by looking at existing evolution data + parallel_conflicts = _detect_parallel_task_conflicts( + project_dir, spec_name, all_changed_files + ) debug( MODULE, - f"Refreshing evolution data from worktree: {worktree_path}", - task_source_branch=task_source_branch, - ) - orchestrator.evolution_tracker.refresh_from_git( - spec_name, worktree_path, target_branch=task_source_branch + f"Parallel task conflicts detected: {len(parallel_conflicts)}", + conflicts=parallel_conflicts[:5] if parallel_conflicts else [], ) - # Get merge preview (semantic conflicts between parallel tasks) - debug(MODULE, "Generating merge preview...") - preview = orchestrator.preview_merge([spec_name]) - - # Transform semantic conflicts to UI-friendly format + # Build conflict list - start with parallel task conflicts conflicts = [] - for c in preview.get("conflicts", []): - debug_verbose( - MODULE, - "Processing semantic conflict", - file=c.get("file", ""), - severity=c.get("severity", "unknown"), - ) + for pc in parallel_conflicts: conflicts.append( { - "file": c.get("file", ""), - "location": c.get("location", ""), - "tasks": c.get("tasks", []), - "severity": c.get("severity", "unknown"), - "canAutoMerge": c.get("can_auto_merge", False), - "strategy": c.get("strategy"), - "reason": c.get("reason", ""), - "type": "semantic", + "file": pc["file"], + "location": "file-level", + "tasks": pc["tasks"], + "severity": "medium", + "canAutoMerge": False, + "strategy": None, + "reason": f"File modified by multiple active tasks: {', '.join(pc['tasks'])}", + "type": "parallel", } ) @@ -669,13 +864,14 @@ def handle_merge_preview_command( } ) - summary = preview.get("summary", {}) # Count only non-lock-file conflicts git_conflict_count = len(git_conflicts.get("conflicting_files", [])) - len( lock_files_excluded ) - total_conflicts = summary.get("total_conflicts", 0) + git_conflict_count - conflict_files = summary.get("conflict_files", 0) + git_conflict_count + # Calculate totals from our conflict lists (git conflicts + parallel conflicts) + parallel_conflict_count = len(parallel_conflicts) + total_conflicts = git_conflict_count + parallel_conflict_count + conflict_files = git_conflict_count + parallel_conflict_count # Filter lock files from the git conflicts list for the response non_lock_conflicting_files = [ @@ -761,7 +957,7 @@ def handle_merge_preview_command( "totalFiles": total_files_from_git, "conflictFiles": conflict_files, "totalConflicts": total_conflicts, - "autoMergeable": summary.get("auto_mergeable", 0), + "autoMergeable": 0, # Not tracking auto-merge in lightweight mode "hasGitConflicts": git_conflicts["has_conflicts"] and len(non_lock_conflicting_files) > 0, # Include path-mapped AI merge count for UI display @@ -776,10 +972,9 @@ def handle_merge_preview_command( "Merge preview complete", total_files=result["summary"]["totalFiles"], total_files_source="git_diff", - semantic_tracked_files=summary.get("total_files", 0), total_conflicts=result["summary"]["totalConflicts"], has_git_conflicts=git_conflicts["has_conflicts"], - auto_mergeable=result["summary"]["autoMergeable"], + parallel_conflicts=parallel_conflict_count, path_mapped_ai_merges=len(path_mapped_ai_merges), total_renames=len(path_mappings), ) @@ -805,3 +1000,109 @@ def handle_merge_preview_command( "pathMappedAIMergeCount": 0, }, } + + +def cleanup_old_worktrees_command( + project_dir: Path, days: int = 30, dry_run: bool = False +) -> dict: + """ + Clean up old worktrees that haven't been modified in the specified number of days. + + Args: + project_dir: Project root directory + days: Number of days threshold (default: 30) + dry_run: If True, only show what would be removed (default: False) + + Returns: + Dictionary with cleanup results + """ + try: + manager = WorktreeManager(project_dir) + + removed, failed = manager.cleanup_old_worktrees( + days_threshold=days, dry_run=dry_run + ) + + return { + "success": True, + "removed": removed, + "failed": failed, + "dry_run": dry_run, + "days_threshold": days, + } + + except Exception as e: + return { + "success": False, + "error": str(e), + "removed": [], + "failed": [], + } + + +def worktree_summary_command(project_dir: Path) -> dict: + """ + Get a summary of all worktrees with age information. + + Args: + project_dir: Project root directory + + Returns: + Dictionary with worktree summary data + """ + try: + manager = WorktreeManager(project_dir) + + # Print to console for CLI usage + manager.print_worktree_summary() + + # Also return data for programmatic access + worktrees = manager.list_all_worktrees() + warning = manager.get_worktree_count_warning() + + # Categorize by age + recent = [] + week_old = [] + month_old = [] + very_old = [] + unknown_age = [] + + for info in worktrees: + data = { + "spec_name": info.spec_name, + "days_since_last_commit": info.days_since_last_commit, + "commit_count": info.commit_count, + } + + if info.days_since_last_commit is None: + unknown_age.append(data) + elif info.days_since_last_commit < 7: + recent.append(data) + elif info.days_since_last_commit < 30: + week_old.append(data) + elif info.days_since_last_commit < 90: + month_old.append(data) + else: + very_old.append(data) + + return { + "success": True, + "total_worktrees": len(worktrees), + "categories": { + "recent": recent, + "week_old": week_old, + "month_old": month_old, + "very_old": very_old, + "unknown_age": unknown_age, + }, + "warning": warning, + } + + except Exception as e: + return { + "success": False, + "error": str(e), + "total_worktrees": 0, + "categories": {}, + "warning": None, + } diff --git a/apps/backend/commit_message.py b/apps/backend/commit_message.py index 0518f20fba..b90242590c 100644 --- a/apps/backend/commit_message.py +++ b/apps/backend/commit_message.py @@ -231,7 +231,9 @@ async def _call_claude(prompt: str) -> str: msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text logger.info(f"Generated commit message: {len(response_text)} chars") diff --git a/apps/backend/core/agent.py b/apps/backend/core/agent.py index 8b2cc8d540..6d9ffe3702 100644 --- a/apps/backend/core/agent.py +++ b/apps/backend/core/agent.py @@ -39,7 +39,7 @@ run_followup_planner, save_session_memory, save_session_to_graphiti, - sync_plan_to_source, + sync_spec_to_source, ) # Ensure all exports are available at module level @@ -57,7 +57,7 @@ "load_implementation_plan", "find_subtask_in_plan", "find_phase_for_subtask", - "sync_plan_to_source", + "sync_spec_to_source", "AUTO_CONTINUE_DELAY_SECONDS", "HUMAN_INTERVENTION_FILE", ] diff --git a/apps/backend/core/auth.py b/apps/backend/core/auth.py index be105e1ff9..ce105a0caf 100644 --- a/apps/backend/core/auth.py +++ b/apps/backend/core/auth.py @@ -23,12 +23,21 @@ # Environment variables to pass through to SDK subprocess # NOTE: ANTHROPIC_API_KEY is intentionally excluded to prevent silent API billing SDK_ENV_VARS = [ + # API endpoint configuration "ANTHROPIC_BASE_URL", "ANTHROPIC_AUTH_TOKEN", + # Model overrides (from API Profile custom model mappings) + "ANTHROPIC_MODEL", + "ANTHROPIC_DEFAULT_HAIKU_MODEL", + "ANTHROPIC_DEFAULT_SONNET_MODEL", + "ANTHROPIC_DEFAULT_OPUS_MODEL", + # SDK behavior configuration "NO_PROXY", "DISABLE_TELEMETRY", "DISABLE_COST_WARNINGS", "API_TIMEOUT_MS", + # Windows-specific: Git Bash path for Claude Code CLI + "CLAUDE_CODE_GIT_BASH_PATH", ] @@ -208,6 +217,85 @@ def require_auth_token() -> str: return token +def _find_git_bash_path() -> str | None: + """ + Find git-bash (bash.exe) path on Windows. + + Uses 'where git' to find git.exe, then derives bash.exe location from it. + Git for Windows installs bash.exe in the 'bin' directory alongside git.exe + or in the parent 'bin' directory when git.exe is in 'cmd'. + + Returns: + Full path to bash.exe if found, None otherwise + """ + if platform.system() != "Windows": + return None + + # If already set in environment, use that + existing = os.environ.get("CLAUDE_CODE_GIT_BASH_PATH") + if existing and os.path.exists(existing): + return existing + + git_path = None + + # Method 1: Use 'where' command to find git.exe + try: + # Use where.exe explicitly for reliability + result = subprocess.run( + ["where.exe", "git"], + capture_output=True, + text=True, + timeout=5, + shell=False, + ) + + if result.returncode == 0 and result.stdout.strip(): + git_paths = result.stdout.strip().splitlines() + if git_paths: + git_path = git_paths[0].strip() + except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError): + # Intentionally suppress errors - best-effort detection with fallback to common paths + pass + + # Method 2: Check common installation paths if 'where' didn't work + if not git_path: + common_git_paths = [ + os.path.expandvars(r"%PROGRAMFILES%\Git\cmd\git.exe"), + os.path.expandvars(r"%PROGRAMFILES%\Git\bin\git.exe"), + os.path.expandvars(r"%PROGRAMFILES(X86)%\Git\cmd\git.exe"), + os.path.expandvars(r"%LOCALAPPDATA%\Programs\Git\cmd\git.exe"), + ] + for path in common_git_paths: + if os.path.exists(path): + git_path = path + break + + if not git_path: + return None + + # Derive bash.exe location from git.exe location + # Git for Windows structure: + # C:\...\Git\cmd\git.exe -> bash.exe is at C:\...\Git\bin\bash.exe + # C:\...\Git\bin\git.exe -> bash.exe is at C:\...\Git\bin\bash.exe + # C:\...\Git\mingw64\bin\git.exe -> bash.exe is at C:\...\Git\bin\bash.exe + git_dir = os.path.dirname(git_path) + git_parent = os.path.dirname(git_dir) + git_grandparent = os.path.dirname(git_parent) + + # Check common bash.exe locations relative to git installation + possible_bash_paths = [ + os.path.join(git_parent, "bin", "bash.exe"), # cmd -> bin + os.path.join(git_dir, "bash.exe"), # If git.exe is in bin + os.path.join(git_grandparent, "bin", "bash.exe"), # mingw64/bin -> bin + ] + + for bash_path in possible_bash_paths: + if os.path.exists(bash_path): + return bash_path + + return None + + def get_sdk_env_vars() -> dict[str, str]: """ Get environment variables to pass to SDK. @@ -215,6 +303,8 @@ def get_sdk_env_vars() -> dict[str, str]: Collects relevant env vars (ANTHROPIC_BASE_URL, etc.) that should be passed through to the claude-agent-sdk subprocess. + On Windows, auto-detects CLAUDE_CODE_GIT_BASH_PATH if not already set. + Returns: Dict of env var name -> value for non-empty vars """ @@ -223,6 +313,14 @@ def get_sdk_env_vars() -> dict[str, str]: value = os.environ.get(var) if value: env[var] = value + + # On Windows, auto-detect git-bash path if not already set + # Claude Code CLI requires bash.exe to run on Windows + if platform.system() == "Windows" and "CLAUDE_CODE_GIT_BASH_PATH" not in env: + bash_path = _find_git_bash_path() + if bash_path: + env["CLAUDE_CODE_GIT_BASH_PATH"] = bash_path + return env diff --git a/apps/backend/core/client.py b/apps/backend/core/client.py index 3d8dbe8de6..761cf2cf72 100644 --- a/apps/backend/core/client.py +++ b/apps/backend/core/client.py @@ -16,6 +16,7 @@ import json import logging import os +import platform import threading import time from pathlib import Path @@ -488,6 +489,12 @@ def create_client( # Collect env vars to pass to SDK (ANTHROPIC_BASE_URL, etc.) sdk_env = get_sdk_env_vars() + # Debug: Log git-bash path detection on Windows + if "CLAUDE_CODE_GIT_BASH_PATH" in sdk_env: + logger.info(f"Git Bash path found: {sdk_env['CLAUDE_CODE_GIT_BASH_PATH']}") + elif platform.system() == "Windows": + logger.warning("Git Bash path not detected on Windows!") + # Check if Linear integration is enabled linear_enabled = is_linear_enabled() linear_api_key = os.environ.get("LINEAR_API_KEY", "") @@ -538,6 +545,48 @@ def create_client( # cases where Claude uses absolute paths for file operations project_path_str = str(project_dir.resolve()) spec_path_str = str(spec_dir.resolve()) + + # Detect if we're running in a worktree and get the original project directory + # Worktrees are located in either: + # - .auto-claude/worktrees/tasks/{spec-name}/ (new location) + # - .worktrees/{spec-name}/ (legacy location) + # When running in a worktree, we need to allow access to both the worktree + # and the original project's .auto-claude/ directory for spec files + original_project_permissions = [] + resolved_project_path = project_dir.resolve() + + # Check for worktree paths and extract original project directory + # This handles spec worktrees, PR review worktrees, and legacy worktrees + # Note: Windows paths are normalized to forward slashes before comparison + worktree_markers = [ + "/.auto-claude/worktrees/tasks/", # Spec/task worktrees + "/.auto-claude/github/pr/worktrees/", # PR review worktrees + "/.worktrees/", # Legacy worktree location + ] + project_path_posix = str(resolved_project_path).replace("\\", "/") + + for marker in worktree_markers: + if marker in project_path_posix: + # Extract the original project directory (parent of worktree location) + # Use rsplit to get the rightmost occurrence (handles nested projects) + original_project_str = project_path_posix.rsplit(marker, 1)[0] + original_project_dir = Path(original_project_str) + + # Grant permissions for relevant directories in the original project + permission_ops = ["Read", "Write", "Edit", "Glob", "Grep"] + dirs_to_permit = [ + original_project_dir / ".auto-claude", + original_project_dir / ".worktrees", # Legacy support + ] + + for dir_path in dirs_to_permit: + if dir_path.exists(): + path_str = str(dir_path.resolve()) + original_project_permissions.extend( + [f"{op}({path_str}/**)" for op in permission_ops] + ) + break + security_settings = { "sandbox": {"enabled": True, "autoAllowBashIfSandboxed": True}, "permissions": { @@ -560,6 +609,9 @@ def create_client( f"Read({spec_path_str}/**)", f"Write({spec_path_str}/**)", f"Edit({spec_path_str}/**)", + # Allow original project's .auto-claude/ and .worktrees/ directories + # when running in a worktree (fixes issue #385 - permission errors) + *original_project_permissions, # Bash permission granted here, but actual commands are validated # by the bash_security_hook (see security.py for allowed commands) "Bash(*)", @@ -596,6 +648,8 @@ def create_client( print(f"Security settings: {settings_file}") print(" - Sandbox enabled (OS-level bash isolation)") print(f" - Filesystem restricted to: {project_dir.resolve()}") + if original_project_permissions: + print(" - Worktree permissions: granted for original project directories") print(" - Bash commands restricted to allowlist") if max_thinking_tokens: print(f" - Extended thinking: {max_thinking_tokens:,} tokens") @@ -742,6 +796,9 @@ def create_client( "settings": str(settings_file.resolve()), "env": sdk_env, # Pass ANTHROPIC_BASE_URL etc. to subprocess "max_thinking_tokens": max_thinking_tokens, # Extended thinking budget + # Enable file checkpointing to track file read/write state across tool calls + # This prevents "File has not been read yet" errors in recovery sessions + "enable_file_checkpointing": True, } # Add structured output format if specified diff --git a/apps/backend/core/dependency_validator.py b/apps/backend/core/dependency_validator.py new file mode 100644 index 0000000000..8517cb3631 --- /dev/null +++ b/apps/backend/core/dependency_validator.py @@ -0,0 +1,50 @@ +""" +Dependency Validator +==================== + +Validates platform-specific dependencies are installed before running agents. +""" + +import sys +from pathlib import Path + + +def validate_platform_dependencies() -> None: + """ + Validate that platform-specific dependencies are installed. + + Raises: + SystemExit: If required platform-specific dependencies are missing, + with helpful installation instructions. + """ + # Check Windows-specific dependencies + if sys.platform == "win32" and sys.version_info >= (3, 12): + try: + import pywintypes # noqa: F401 + except ImportError: + _exit_with_pywin32_error() + + +def _exit_with_pywin32_error() -> None: + """Exit with helpful error message for missing pywin32.""" + # Use sys.prefix to detect the virtual environment path + # This works for venv and poetry environments + venv_activate = Path(sys.prefix) / "Scripts" / "activate" + + sys.exit( + "Error: Required Windows dependency 'pywin32' is not installed.\n" + "\n" + "Auto Claude requires pywin32 on Windows for LadybugDB/Graphiti memory integration.\n" + "\n" + "To fix this:\n" + "1. Activate your virtual environment:\n" + f" {venv_activate}\n" + "\n" + "2. Install pywin32:\n" + " pip install pywin32>=306\n" + "\n" + " Or reinstall all dependencies:\n" + " pip install -r requirements.txt\n" + "\n" + f"Current Python: {sys.executable}\n" + ) diff --git a/apps/backend/core/git_executable.py b/apps/backend/core/git_executable.py new file mode 100644 index 0000000000..d17a3e07ef --- /dev/null +++ b/apps/backend/core/git_executable.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +""" +Git Executable Finder +====================== + +Utility to find the git executable, with Windows-specific fallbacks. +Separated into its own module to avoid circular imports. +""" + +import os +import shutil +import subprocess +from pathlib import Path + +_cached_git_path: str | None = None + + +def get_git_executable() -> str: + """Find the git executable, with Windows-specific fallbacks. + + Returns the path to git executable. On Windows, checks multiple sources: + 1. CLAUDE_CODE_GIT_BASH_PATH env var (set by Electron frontend) + 2. shutil.which (if git is in PATH) + 3. Common installation locations + 4. Windows 'where' command + + Caches the result after first successful find. + """ + global _cached_git_path + + # Return cached result if available + if _cached_git_path is not None: + return _cached_git_path + + git_path = _find_git_executable() + _cached_git_path = git_path + return git_path + + +def _find_git_executable() -> str: + """Internal function to find git executable.""" + # 1. Check CLAUDE_CODE_GIT_BASH_PATH (set by Electron frontend) + # This env var points to bash.exe, we can derive git.exe from it + bash_path = os.environ.get("CLAUDE_CODE_GIT_BASH_PATH") + if bash_path: + try: + bash_path_obj = Path(bash_path) + if bash_path_obj.exists(): + git_dir = bash_path_obj.parent.parent + # Try cmd/git.exe first (preferred), then bin/git.exe + for git_subpath in ["cmd/git.exe", "bin/git.exe"]: + git_path = git_dir / git_subpath + if git_path.is_file(): + return str(git_path) + except (OSError, ValueError): + pass + + # 2. Try shutil.which (works if git is in PATH) + git_path = shutil.which("git") + if git_path: + return git_path + + # 3. Windows-specific: check common installation locations + if os.name == "nt": + common_paths = [ + os.path.expandvars(r"%PROGRAMFILES%\Git\cmd\git.exe"), + os.path.expandvars(r"%PROGRAMFILES%\Git\bin\git.exe"), + os.path.expandvars(r"%PROGRAMFILES(X86)%\Git\cmd\git.exe"), + os.path.expandvars(r"%LOCALAPPDATA%\Programs\Git\cmd\git.exe"), + r"C:\Program Files\Git\cmd\git.exe", + r"C:\Program Files (x86)\Git\cmd\git.exe", + ] + for path in common_paths: + try: + if os.path.isfile(path): + return path + except OSError: + continue + + # 4. Try 'where' command with shell=True (more reliable on Windows) + try: + result = subprocess.run( + "where git", + capture_output=True, + text=True, + timeout=5, + shell=True, + ) + if result.returncode == 0 and result.stdout.strip(): + found_path = result.stdout.strip().split("\n")[0].strip() + if found_path and os.path.isfile(found_path): + return found_path + except (subprocess.TimeoutExpired, OSError): + pass + + # Default fallback - let subprocess handle it (may fail) + return "git" + + +def run_git( + args: list[str], + cwd: Path | str | None = None, + timeout: int = 60, + input_data: str | None = None, +) -> subprocess.CompletedProcess: + """Run a git command with proper executable finding. + + Args: + args: Git command arguments (without 'git' prefix) + cwd: Working directory for the command + timeout: Command timeout in seconds (default: 60) + input_data: Optional string data to pass to stdin + + Returns: + CompletedProcess with command results. + """ + git = get_git_executable() + try: + return subprocess.run( + [git] + args, + cwd=cwd, + input=input_data, + capture_output=True, + text=True, + encoding="utf-8", + errors="replace", + timeout=timeout, + ) + except subprocess.TimeoutExpired: + return subprocess.CompletedProcess( + args=[git] + args, + returncode=-1, + stdout="", + stderr=f"Command timed out after {timeout} seconds", + ) + except FileNotFoundError: + return subprocess.CompletedProcess( + args=[git] + args, + returncode=-1, + stdout="", + stderr="Git executable not found. Please ensure git is installed and in PATH.", + ) diff --git a/apps/backend/core/phase_event.py b/apps/backend/core/phase_event.py index a86321cf02..acc034605b 100644 --- a/apps/backend/core/phase_event.py +++ b/apps/backend/core/phase_event.py @@ -52,4 +52,8 @@ def emit_phase( print(f"{PHASE_MARKER_PREFIX}{json.dumps(payload, default=str)}", flush=True) except (OSError, UnicodeEncodeError) as e: if _DEBUG: - print(f"[phase_event] emit failed: {e}", file=sys.stderr, flush=True) + try: + sys.stderr.write(f"[phase_event] emit failed: {e}\n") + sys.stderr.flush() + except (OSError, UnicodeEncodeError): + pass # Truly silent on complete I/O failure diff --git a/apps/backend/core/workspace.py b/apps/backend/core/workspace.py index ddfd49059b..6ae292ab6b 100644 --- a/apps/backend/core/workspace.py +++ b/apps/backend/core/workspace.py @@ -4,7 +4,7 @@ ============================================= Handles workspace isolation through Git worktrees, where each spec -gets its own isolated worktree in .worktrees/{spec-name}/. +gets its own isolated worktree in .auto-claude/worktrees/tasks/{spec-name}/. This module has been refactored for better maintainability: - Models and enums: workspace/models.py @@ -90,12 +90,18 @@ def is_debug_enabled(): from core.workspace.git_utils import ( detect_file_renames as _detect_file_renames, ) +from core.workspace.git_utils import ( + get_binary_file_content_from_ref as _get_binary_file_content_from_ref, +) from core.workspace.git_utils import ( get_changed_files_from_branch as _get_changed_files_from_branch, ) from core.workspace.git_utils import ( get_file_content_from_ref as _get_file_content_from_ref, ) +from core.workspace.git_utils import ( + is_binary_file as _is_binary_file, +) from core.workspace.git_utils import ( is_lock_file as _is_lock_file, ) @@ -239,14 +245,16 @@ def merge_existing_build( if smart_result is not None: # Smart merge handled it (success or identified conflicts) if smart_result.get("success"): - # Check if smart merge resolved git conflicts or path-mapped files + # Check if smart merge actually DID work (resolved conflicts via AI) + # NOTE: "files_merged" in stats is misleading - it's "files TO merge" not "files WERE merged" + # The smart merge preview returns this count but doesn't actually perform the merge + # in the no-conflict path. We only skip git merge if AI actually did work. stats = smart_result.get("stats", {}) had_conflicts = stats.get("conflicts_resolved", 0) > 0 - files_merged = stats.get("files_merged", 0) > 0 ai_assisted = stats.get("ai_assisted", 0) > 0 - if had_conflicts or files_merged or ai_assisted: - # Git conflicts were resolved OR path-mapped files were AI merged + if had_conflicts or ai_assisted: + # AI actually resolved conflicts or assisted with merges # Changes are already written and staged - no need for git merge _print_merge_success( no_commit, stats, spec_name=spec_name, keep_worktree=True @@ -258,7 +266,8 @@ def merge_existing_build( return True else: - # No conflicts and no files merged - do standard git merge + # No conflicts needed AI resolution - do standard git merge + # This is the common case: no divergence, just need to merge changes success_result = manager.merge_worktree( spec_name, delete_after=False, no_commit=no_commit ) @@ -773,28 +782,44 @@ def _resolve_git_conflicts_with_ai( print(muted(f" Copying {len(new_files)} new file(s) first (dependencies)...")) for file_path, status in new_files: try: - content = _get_file_content_from_ref( - project_dir, spec_branch, file_path - ) - if content is not None: - # Apply path mapping - write to new location if file was renamed - target_file_path = _apply_path_mapping(file_path, path_mappings) - target_path = project_dir / target_file_path - target_path.parent.mkdir(parents=True, exist_ok=True) - target_path.write_text(content, encoding="utf-8") - subprocess.run( - ["git", "add", target_file_path], - cwd=project_dir, - capture_output=True, + # Apply path mapping - write to new location if file was renamed + target_file_path = _apply_path_mapping(file_path, path_mappings) + target_path = project_dir / target_file_path + target_path.parent.mkdir(parents=True, exist_ok=True) + + # Handle binary files differently - use bytes instead of text + if _is_binary_file(file_path): + binary_content = _get_binary_file_content_from_ref( + project_dir, spec_branch, file_path + ) + if binary_content is not None: + target_path.write_bytes(binary_content) + subprocess.run( + ["git", "add", target_file_path], + cwd=project_dir, + capture_output=True, + ) + resolved_files.append(target_file_path) + debug(MODULE, f"Copied new binary file: {file_path}") + else: + content = _get_file_content_from_ref( + project_dir, spec_branch, file_path ) - resolved_files.append(target_file_path) - if target_file_path != file_path: - debug( - MODULE, - f"Copied new file with path mapping: {file_path} -> {target_file_path}", + if content is not None: + target_path.write_text(content, encoding="utf-8") + subprocess.run( + ["git", "add", target_file_path], + cwd=project_dir, + capture_output=True, ) - else: - debug(MODULE, f"Copied new file: {file_path}") + resolved_files.append(target_file_path) + if target_file_path != file_path: + debug( + MODULE, + f"Copied new file with path mapping: {file_path} -> {target_file_path}", + ) + else: + debug(MODULE, f"Copied new file: {file_path}") except Exception as e: debug_warning(MODULE, f"Could not copy new file {file_path}: {e}") @@ -1118,24 +1143,44 @@ def _resolve_git_conflicts_with_ai( ) else: # Modified without path change - simple copy - content = _get_file_content_from_ref( - project_dir, spec_branch, file_path - ) - if content is not None: - target_path = project_dir / target_file_path - target_path.parent.mkdir(parents=True, exist_ok=True) - target_path.write_text(content, encoding="utf-8") - subprocess.run( - ["git", "add", target_file_path], - cwd=project_dir, - capture_output=True, + # Check if binary file to use correct read/write method + target_path = project_dir / target_file_path + target_path.parent.mkdir(parents=True, exist_ok=True) + + if _is_binary_file(file_path): + binary_content = _get_binary_file_content_from_ref( + project_dir, spec_branch, file_path + ) + if binary_content is not None: + target_path.write_bytes(binary_content) + subprocess.run( + ["git", "add", target_file_path], + cwd=project_dir, + capture_output=True, + ) + resolved_files.append(target_file_path) + if target_file_path != file_path: + debug( + MODULE, + f"Merged binary with path mapping: {file_path} -> {target_file_path}", + ) + else: + content = _get_file_content_from_ref( + project_dir, spec_branch, file_path ) - resolved_files.append(target_file_path) - if target_file_path != file_path: - debug( - MODULE, - f"Merged with path mapping: {file_path} -> {target_file_path}", + if content is not None: + target_path.write_text(content, encoding="utf-8") + subprocess.run( + ["git", "add", target_file_path], + cwd=project_dir, + capture_output=True, ) + resolved_files.append(target_file_path) + if target_file_path != file_path: + debug( + MODULE, + f"Merged with path mapping: {file_path} -> {target_file_path}", + ) except Exception as e: print(muted(f" Warning: Could not process {file_path}: {e}")) @@ -1431,7 +1476,9 @@ async def _merge_file_with_ai_async( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text if response_text: diff --git a/apps/backend/core/workspace/__init__.py b/apps/backend/core/workspace/__init__.py index e5b5ac711a..db278769ea 100644 --- a/apps/backend/core/workspace/__init__.py +++ b/apps/backend/core/workspace/__init__.py @@ -4,7 +4,7 @@ ============================= Handles workspace isolation through Git worktrees, where each spec -gets its own isolated worktree in .worktrees/{spec-name}/. +gets its own isolated worktree in .auto-claude/worktrees/tasks/{spec-name}/. This package provides: - Workspace setup and configuration @@ -62,6 +62,7 @@ MAX_SYNTAX_FIX_RETRIES, MERGE_LOCK_TIMEOUT, _create_conflict_file_with_git, + _get_binary_file_content_from_ref, _get_changed_files_from_branch, _get_file_content_from_ref, _is_binary_file, @@ -70,6 +71,7 @@ _is_process_running, _validate_merged_syntax, create_conflict_file_with_git, + get_binary_file_content_from_ref, get_changed_files_from_branch, get_current_branch, get_existing_build_worktree, @@ -117,6 +119,7 @@ "get_current_branch", "get_existing_build_worktree", "get_file_content_from_ref", + "get_binary_file_content_from_ref", "get_changed_files_from_branch", "is_process_running", "is_binary_file", diff --git a/apps/backend/core/workspace/finalization.py b/apps/backend/core/workspace/finalization.py index 3078f2f8a2..a398391f84 100644 --- a/apps/backend/core/workspace/finalization.py +++ b/apps/backend/core/workspace/finalization.py @@ -169,7 +169,15 @@ def handle_workspace_choice( if staging_path: print(highlight(f" cd {staging_path}")) else: - print(highlight(f" cd {project_dir}/.worktrees/{spec_name}")) + worktree_path = get_existing_build_worktree(project_dir, spec_name) + if worktree_path: + print(highlight(f" cd {worktree_path}")) + else: + print( + highlight( + f" cd {project_dir}/.auto-claude/worktrees/tasks/{spec_name}" + ) + ) # Show likely test/run commands if staging_path: @@ -232,7 +240,15 @@ def handle_workspace_choice( if staging_path: print(highlight(f" cd {staging_path}")) else: - print(highlight(f" cd {project_dir}/.worktrees/{spec_name}")) + worktree_path = get_existing_build_worktree(project_dir, spec_name) + if worktree_path: + print(highlight(f" cd {worktree_path}")) + else: + print( + highlight( + f" cd {project_dir}/.auto-claude/worktrees/tasks/{spec_name}" + ) + ) print() print("When you're ready to add it:") print(highlight(f" python auto-claude/run.py --spec {spec_name} --merge")) diff --git a/apps/backend/core/workspace/git_utils.py b/apps/backend/core/workspace/git_utils.py index c027c4a426..5f6093b2e6 100644 --- a/apps/backend/core/workspace/git_utils.py +++ b/apps/backend/core/workspace/git_utils.py @@ -10,6 +10,45 @@ import subprocess from pathlib import Path +from core.git_executable import get_git_executable, run_git + +__all__ = [ + # Exported helpers + "get_git_executable", + "run_git", + # Constants + "MAX_FILE_LINES_FOR_AI", + "MAX_PARALLEL_AI_MERGES", + "LOCK_FILES", + "BINARY_EXTENSIONS", + "MERGE_LOCK_TIMEOUT", + "MAX_SYNTAX_FIX_RETRIES", + # Functions + "detect_file_renames", + "apply_path_mapping", + "get_merge_base", + "has_uncommitted_changes", + "get_current_branch", + "get_existing_build_worktree", + "get_file_content_from_ref", + "get_binary_file_content_from_ref", + "get_changed_files_from_branch", + "is_process_running", + "is_binary_file", + "is_lock_file", + "validate_merged_syntax", + "create_conflict_file_with_git", + # Backward compat aliases + "_is_process_running", + "_is_binary_file", + "_is_lock_file", + "_validate_merged_syntax", + "_get_file_content_from_ref", + "_get_binary_file_content_from_ref", + "_get_changed_files_from_branch", + "_create_conflict_file_with_git", +] + # Constants for merge limits MAX_FILE_LINES_FOR_AI = 5000 # Skip AI for files larger than this MAX_PARALLEL_AI_MERGES = 5 # Limit concurrent AI merge operations @@ -33,6 +72,7 @@ } BINARY_EXTENSIONS = { + # Images ".png", ".jpg", ".jpeg", @@ -41,6 +81,11 @@ ".webp", ".bmp", ".svg", + ".tiff", + ".tif", + ".heic", + ".heif", + # Documents ".pdf", ".doc", ".docx", @@ -48,32 +93,63 @@ ".xlsx", ".ppt", ".pptx", + # Archives ".zip", ".tar", ".gz", ".rar", ".7z", + ".bz2", + ".xz", + ".zst", + # Executables and libraries ".exe", ".dll", ".so", ".dylib", ".bin", + ".msi", + ".app", + # WebAssembly + ".wasm", + # Audio ".mp3", - ".mp4", ".wav", + ".ogg", + ".flac", + ".aac", + ".m4a", + # Video + ".mp4", ".avi", ".mov", ".mkv", + ".webm", + ".wmv", + ".flv", + # Fonts ".woff", ".woff2", ".ttf", ".otf", ".eot", + # Compiled code ".pyc", ".pyo", ".class", ".o", ".obj", + # Data files + ".dat", + ".db", + ".sqlite", + ".sqlite3", + # Other binary formats + ".cur", + ".ani", + ".pbm", + ".pgm", + ".ppm", } # Merge lock timeout in seconds @@ -113,9 +189,8 @@ def detect_file_renames( # -M flag enables rename detection # --diff-filter=R shows only renames # --name-status shows status and file names - result = subprocess.run( + result = run_git( [ - "git", "log", "--name-status", "-M", @@ -124,8 +199,6 @@ def detect_file_renames( f"{from_ref}..{to_ref}", ], cwd=project_dir, - capture_output=True, - text=True, ) if result.returncode == 0: @@ -175,39 +248,21 @@ def get_merge_base(project_dir: Path, ref1: str, ref2: str) -> str | None: Returns: Merge-base commit hash, or None if not found """ - try: - result = subprocess.run( - ["git", "merge-base", ref1, ref2], - cwd=project_dir, - capture_output=True, - text=True, - ) - if result.returncode == 0: - return result.stdout.strip() - except Exception: - pass + result = run_git(["merge-base", ref1, ref2], cwd=project_dir) + if result.returncode == 0: + return result.stdout.strip() return None def has_uncommitted_changes(project_dir: Path) -> bool: """Check if user has unsaved work.""" - result = subprocess.run( - ["git", "status", "--porcelain"], - cwd=project_dir, - capture_output=True, - text=True, - ) + result = run_git(["status", "--porcelain"], cwd=project_dir) return bool(result.stdout.strip()) def get_current_branch(project_dir: Path) -> str: """Get the current branch name.""" - result = subprocess.run( - ["git", "rev-parse", "--abbrev-ref", "HEAD"], - cwd=project_dir, - capture_output=True, - text=True, - ) + result = run_git(["rev-parse", "--abbrev-ref", "HEAD"], cwd=project_dir) return result.stdout.strip() @@ -222,10 +277,16 @@ def get_existing_build_worktree(project_dir: Path, spec_name: str) -> Path | Non Returns: Path to the worktree if it exists for this spec, None otherwise """ - # Per-spec worktree path: .worktrees/{spec-name}/ - worktree_path = project_dir / ".worktrees" / spec_name - if worktree_path.exists(): - return worktree_path + # New path first + new_path = project_dir / ".auto-claude" / "worktrees" / "tasks" / spec_name + if new_path.exists(): + return new_path + + # Legacy fallback + legacy_path = project_dir / ".worktrees" / spec_name + if legacy_path.exists(): + return legacy_path + return None @@ -233,11 +294,29 @@ def get_file_content_from_ref( project_dir: Path, ref: str, file_path: str ) -> str | None: """Get file content from a git ref (branch, commit, etc.).""" + result = run_git(["show", f"{ref}:{file_path}"], cwd=project_dir) + if result.returncode == 0: + return result.stdout + return None + + +def get_binary_file_content_from_ref( + project_dir: Path, ref: str, file_path: str +) -> bytes | None: + """Get binary file content from a git ref (branch, commit, etc.). + + Unlike get_file_content_from_ref, this returns raw bytes without + text decoding, suitable for binary files like images, audio, etc. + + Note: Uses subprocess directly with get_git_executable() since + run_git() always returns text output. + """ + git = get_git_executable() result = subprocess.run( - ["git", "show", f"{ref}:{file_path}"], + [git, "show", f"{ref}:{file_path}"], cwd=project_dir, capture_output=True, - text=True, + text=False, # Return bytes, not text ) if result.returncode == 0: return result.stdout @@ -262,11 +341,9 @@ def get_changed_files_from_branch( Returns: List of (file_path, status) tuples """ - result = subprocess.run( - ["git", "diff", "--name-status", f"{base_branch}...{spec_branch}"], + result = run_git( + ["diff", "--name-status", f"{base_branch}...{spec_branch}"], cwd=project_dir, - capture_output=True, - text=True, ) files = [] @@ -283,15 +360,23 @@ def get_changed_files_from_branch( return files +def _normalize_path(path: str) -> str: + """Normalize path separators to forward slashes for cross-platform comparison.""" + return path.replace("\\", "/") + + def _is_auto_claude_file(file_path: str) -> bool: - """Check if a file is in the .auto-claude or auto-claude/specs directory.""" - # These patterns cover the internal spec/build files that shouldn't be merged + """Check if a file is in the .auto-claude or auto-claude/specs directory. + + Handles both forward slashes (Unix/Git output) and backslashes (Windows). + """ + normalized = _normalize_path(file_path) excluded_patterns = [ ".auto-claude/", "auto-claude/specs/", ] for pattern in excluded_patterns: - if file_path.startswith(pattern): + if normalized.startswith(pattern): return True return False @@ -485,11 +570,9 @@ def create_conflict_file_with_git( try: # git merge-file # Exit codes: 0 = clean merge, 1 = conflicts, >1 = error - result = subprocess.run( - ["git", "merge-file", "-p", main_path, base_path, wt_path], + result = run_git( + ["merge-file", "-p", main_path, base_path, wt_path], cwd=project_dir, - capture_output=True, - text=True, ) # Read the merged content @@ -516,5 +599,6 @@ def create_conflict_file_with_git( _is_lock_file = is_lock_file _validate_merged_syntax = validate_merged_syntax _get_file_content_from_ref = get_file_content_from_ref +_get_binary_file_content_from_ref = get_binary_file_content_from_ref _get_changed_files_from_branch = get_changed_files_from_branch _create_conflict_file_with_git = create_conflict_file_with_git diff --git a/apps/backend/core/workspace/models.py b/apps/backend/core/workspace/models.py index cc94413e54..92d2178c95 100644 --- a/apps/backend/core/workspace/models.py +++ b/apps/backend/core/workspace/models.py @@ -249,7 +249,7 @@ def get_next_spec_number(self) -> int: max_number = max(max_number, self._scan_specs_dir(main_specs_dir)) # 2. Scan all worktree specs - worktrees_dir = self.project_dir / ".worktrees" + worktrees_dir = self.project_dir / ".auto-claude" / "worktrees" / "tasks" if worktrees_dir.exists(): for worktree in worktrees_dir.iterdir(): if worktree.is_dir(): diff --git a/apps/backend/core/workspace/setup.py b/apps/backend/core/workspace/setup.py index b5b825722b..06269e7c1e 100644 --- a/apps/backend/core/workspace/setup.py +++ b/apps/backend/core/workspace/setup.py @@ -8,11 +8,12 @@ import json import shutil -import subprocess import sys from pathlib import Path +from core.git_executable import run_git from merge import FileTimelineTracker +from security.constants import ALLOWLIST_FILENAME, PROFILE_FILENAME from ui import ( Icons, MenuOption, @@ -267,6 +268,43 @@ def setup_workspace( f"Environment files copied: {', '.join(copied_env_files)}", "success" ) + # Copy security configuration files if they exist + # Note: Unlike env files, security files always overwrite to ensure + # the worktree uses the same security rules as the main project. + # This prevents security bypasses through stale worktree configs. + security_files = [ + ALLOWLIST_FILENAME, + PROFILE_FILENAME, + ] + security_files_copied = [] + + for filename in security_files: + source_file = project_dir / filename + if source_file.is_file(): + target_file = worktree_info.path / filename + try: + shutil.copy2(source_file, target_file) + security_files_copied.append(filename) + except (OSError, PermissionError) as e: + debug_warning(MODULE, f"Failed to copy {filename}: {e}") + print_status( + f"Warning: Could not copy {filename} to worktree", "warning" + ) + + if security_files_copied: + print_status( + f"Security config copied: {', '.join(security_files_copied)}", "success" + ) + + # Ensure .auto-claude/ is in the worktree's .gitignore + # This is critical because the worktree inherits .gitignore from the base branch, + # which may not have .auto-claude/ if that change wasn't committed/pushed. + # Without this, spec files would be committed to the worktree's branch. + from init import ensure_gitignore_entry + + if ensure_gitignore_entry(worktree_info.path, ".auto-claude/"): + debug(MODULE, "Added .auto-claude/ to worktree's .gitignore") + # Copy spec files to worktree if provided localized_spec_dir = None if source_spec_dir and source_spec_dir.exists(): @@ -368,11 +406,9 @@ def initialize_timeline_tracking( files_to_modify.extend(subtask.get("files", [])) # Get the current branch point commit - result = subprocess.run( - ["git", "rev-parse", "HEAD"], + result = run_git( + ["rev-parse", "HEAD"], cwd=project_dir, - capture_output=True, - text=True, ) branch_point = result.stdout.strip() if result.returncode == 0 else None diff --git a/apps/backend/core/worktree.py b/apps/backend/core/worktree.py index ab3b89e3b3..a8e19498b0 100644 --- a/apps/backend/core/worktree.py +++ b/apps/backend/core/worktree.py @@ -4,7 +4,7 @@ ============================================= Each spec gets its own worktree: -- Worktree path: .worktrees/{spec-name}/ +- Worktree path: .auto-claude/worktrees/tasks/{spec-name}/ - Branch name: auto-claude/{spec-name} This allows: @@ -20,8 +20,11 @@ import shutil import subprocess from dataclasses import dataclass +from datetime import datetime from pathlib import Path +from core.git_executable import run_git + class WorktreeError(Exception): """Error during worktree operations.""" @@ -42,20 +45,22 @@ class WorktreeInfo: files_changed: int = 0 additions: int = 0 deletions: int = 0 + last_commit_date: datetime | None = None + days_since_last_commit: int | None = None class WorktreeManager: """ Manages per-spec Git worktrees. - Each spec gets its own worktree in .worktrees/{spec-name}/ with + Each spec gets its own worktree in .auto-claude/worktrees/tasks/{spec-name}/ with a corresponding branch auto-claude/{spec-name}. """ def __init__(self, project_dir: Path, base_branch: str | None = None): self.project_dir = project_dir self.base_branch = base_branch or self._detect_base_branch() - self.worktrees_dir = project_dir / ".worktrees" + self.worktrees_dir = project_dir / ".auto-claude" / "worktrees" / "tasks" self._merge_lock = asyncio.Lock() def _detect_base_branch(self) -> str: @@ -74,13 +79,9 @@ def _detect_base_branch(self) -> str: env_branch = os.getenv("DEFAULT_BRANCH") if env_branch: # Verify the branch exists - result = subprocess.run( - ["git", "rev-parse", "--verify", env_branch], + result = run_git( + ["rev-parse", "--verify", env_branch], cwd=self.project_dir, - capture_output=True, - text=True, - encoding="utf-8", - errors="replace", ) if result.returncode == 0: return env_branch @@ -91,13 +92,9 @@ def _detect_base_branch(self) -> str: # 2. Auto-detect main/master for branch in ["main", "master"]: - result = subprocess.run( - ["git", "rev-parse", "--verify", branch], + result = run_git( + ["rev-parse", "--verify", branch], cwd=self.project_dir, - capture_output=True, - text=True, - encoding="utf-8", - errors="replace", ) if result.returncode == 0: return branch @@ -111,30 +108,29 @@ def _detect_base_branch(self) -> str: def _get_current_branch(self) -> str: """Get the current git branch.""" - result = subprocess.run( - ["git", "rev-parse", "--abbrev-ref", "HEAD"], + result = run_git( + ["rev-parse", "--abbrev-ref", "HEAD"], cwd=self.project_dir, - capture_output=True, - text=True, - encoding="utf-8", - errors="replace", ) if result.returncode != 0: raise WorktreeError(f"Failed to get current branch: {result.stderr}") return result.stdout.strip() def _run_git( - self, args: list[str], cwd: Path | None = None + self, args: list[str], cwd: Path | None = None, timeout: int = 60 ) -> subprocess.CompletedProcess: - """Run a git command and return the result.""" - return subprocess.run( - ["git"] + args, - cwd=cwd or self.project_dir, - capture_output=True, - text=True, - encoding="utf-8", - errors="replace", - ) + """Run a git command and return the result. + + Args: + args: Git command arguments (without 'git' prefix) + cwd: Working directory for the command + timeout: Command timeout in seconds (default: 60) + + Returns: + CompletedProcess with command results. On timeout, returns a + CompletedProcess with returncode=-1 and timeout error in stderr. + """ + return run_git(args, cwd=cwd or self.project_dir, timeout=timeout) def _unstage_gitignored_files(self) -> None: """ @@ -157,14 +153,10 @@ def _unstage_gitignored_files(self) -> None: # 1. Check which staged files are gitignored # git check-ignore returns the files that ARE ignored - result = subprocess.run( - ["git", "check-ignore", "--stdin"], + result = run_git( + ["check-ignore", "--stdin"], cwd=self.project_dir, - input="\n".join(staged_files), - capture_output=True, - text=True, - encoding="utf-8", - errors="replace", + input_data="\n".join(staged_files), ) if result.stdout.strip(): @@ -179,8 +171,10 @@ def _unstage_gitignored_files(self) -> None: file = file.strip() if not file: continue + # Normalize path separators for cross-platform (Windows backslash support) + normalized = file.replace("\\", "/") for pattern in auto_claude_patterns: - if file.startswith(pattern) or f"/{pattern}" in file: + if normalized.startswith(pattern) or f"/{pattern}" in normalized: files_to_unstage.add(file) break @@ -194,13 +188,24 @@ def _unstage_gitignored_files(self) -> None: def setup(self) -> None: """Create worktrees directory if needed.""" - self.worktrees_dir.mkdir(exist_ok=True) + self.worktrees_dir.mkdir(parents=True, exist_ok=True) # ==================== Per-Spec Worktree Methods ==================== def get_worktree_path(self, spec_name: str) -> Path: - """Get the worktree path for a spec.""" - return self.worktrees_dir / spec_name + """Get the worktree path for a spec (checks new and legacy locations).""" + # New path first + new_path = self.worktrees_dir / spec_name + if new_path.exists(): + return new_path + + # Legacy fallback (.worktrees/ instead of .auto-claude/worktrees/tasks/) + legacy_path = self.project_dir / ".worktrees" / spec_name + if legacy_path.exists(): + return legacy_path + + # Return new path as default for creation + return new_path def get_branch_name(self, spec_name: str) -> str: """Get the branch name for a spec.""" @@ -261,6 +266,8 @@ def _get_worktree_stats(self, spec_name: str) -> dict: "files_changed": 0, "additions": 0, "deletions": 0, + "last_commit_date": None, + "days_since_last_commit": None, } if not worktree_path.exists(): @@ -273,6 +280,52 @@ def _get_worktree_stats(self, spec_name: str) -> dict: if result.returncode == 0: stats["commit_count"] = int(result.stdout.strip() or "0") + # Last commit date (most recent commit in this worktree) + result = self._run_git( + ["log", "-1", "--format=%cd", "--date=iso"], cwd=worktree_path + ) + if result.returncode == 0 and result.stdout.strip(): + try: + # Parse ISO date format: "2026-01-04 00:25:25 +0100" + date_str = result.stdout.strip() + # Convert git format to ISO format for fromisoformat() + # "2026-01-04 00:25:25 +0100" -> "2026-01-04T00:25:25+01:00" + parts = date_str.rsplit(" ", 1) + if len(parts) == 2: + date_part, tz_part = parts + # Convert timezone format: "+0100" -> "+01:00" + if len(tz_part) == 5 and ( + tz_part.startswith("+") or tz_part.startswith("-") + ): + tz_formatted = f"{tz_part[:3]}:{tz_part[3:]}" + iso_str = f"{date_part.replace(' ', 'T')}{tz_formatted}" + last_commit_date = datetime.fromisoformat(iso_str) + stats["last_commit_date"] = last_commit_date + # Use timezone-aware now() for accurate comparison + now_aware = datetime.now(last_commit_date.tzinfo) + stats["days_since_last_commit"] = ( + now_aware - last_commit_date + ).days + else: + # Fallback for unexpected timezone format + last_commit_date = datetime.strptime( + parts[0], "%Y-%m-%d %H:%M:%S" + ) + stats["last_commit_date"] = last_commit_date + stats["days_since_last_commit"] = ( + datetime.now() - last_commit_date + ).days + else: + # No timezone in output + last_commit_date = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S") + stats["last_commit_date"] = last_commit_date + stats["days_since_last_commit"] = ( + datetime.now() - last_commit_date + ).days + except (ValueError, TypeError) as e: + # If parsing fails, silently continue without date info + pass + # Diff stats result = self._run_git( ["diff", "--shortstat", f"{self.base_branch}...HEAD"], cwd=worktree_path @@ -327,9 +380,33 @@ def create_worktree(self, spec_name: str) -> WorktreeInfo: # Delete branch if it exists (from previous attempt) self._run_git(["branch", "-D", branch_name]) - # Create worktree with new branch from base + # Fetch latest from remote to ensure we have the most up-to-date code + # GitHub/remote is the source of truth, not the local branch + fetch_result = self._run_git(["fetch", "origin", self.base_branch]) + if fetch_result.returncode != 0: + print( + f"Warning: Could not fetch {self.base_branch} from origin: {fetch_result.stderr}" + ) + print("Falling back to local branch...") + + # Determine the start point for the worktree + # Prefer origin/{base_branch} (remote) over local branch to ensure we have latest code + remote_ref = f"origin/{self.base_branch}" + start_point = self.base_branch # Default to local branch + + # Check if remote ref exists and use it as the source of truth + check_remote = self._run_git(["rev-parse", "--verify", remote_ref]) + if check_remote.returncode == 0: + start_point = remote_ref + print(f"Creating worktree from remote: {remote_ref}") + else: + print( + f"Remote ref {remote_ref} not found, using local branch: {self.base_branch}" + ) + + # Create worktree with new branch from the start point (remote preferred) result = self._run_git( - ["worktree", "add", "-b", branch_name, str(worktree_path), self.base_branch] + ["worktree", "add", "-b", branch_name, str(worktree_path), start_point] ) if result.returncode != 0: @@ -475,17 +552,27 @@ def commit_in_worktree(self, spec_name: str, message: str) -> bool: # ==================== Listing & Discovery ==================== def list_all_worktrees(self) -> list[WorktreeInfo]: - """List all spec worktrees.""" + """List all spec worktrees (includes legacy .worktrees/ location).""" worktrees = [] - - if not self.worktrees_dir.exists(): - return worktrees - - for item in self.worktrees_dir.iterdir(): - if item.is_dir(): - info = self.get_worktree_info(item.name) - if info: - worktrees.append(info) + seen_specs = set() + + # Check new location first + if self.worktrees_dir.exists(): + for item in self.worktrees_dir.iterdir(): + if item.is_dir(): + info = self.get_worktree_info(item.name) + if info: + worktrees.append(info) + seen_specs.add(item.name) + + # Check legacy location (.worktrees/) + legacy_dir = self.project_dir / ".worktrees" + if legacy_dir.exists(): + for item in legacy_dir.iterdir(): + if item.is_dir() and item.name not in seen_specs: + info = self.get_worktree_info(item.name) + if info: + worktrees.append(info) return worktrees @@ -587,81 +674,187 @@ def get_test_commands(self, spec_name: str) -> list[str]: return commands - # ==================== Backward Compatibility ==================== - # These methods provide backward compatibility with the old single-worktree API + def has_uncommitted_changes(self, spec_name: str | None = None) -> bool: + """Check if there are uncommitted changes.""" + cwd = None + if spec_name: + worktree_path = self.get_worktree_path(spec_name) + if worktree_path.exists(): + cwd = worktree_path + result = self._run_git(["status", "--porcelain"], cwd=cwd) + return bool(result.stdout.strip()) - def get_staging_path(self) -> Path | None: - """ - Backward compatibility: Get path to any existing spec worktree. - Prefer using get_worktree_path(spec_name) instead. - """ - worktrees = self.list_all_worktrees() - if worktrees: - return worktrees[0].path - return None + # ==================== Worktree Cleanup Methods ==================== - def get_staging_info(self) -> WorktreeInfo | None: + def get_old_worktrees( + self, days_threshold: int = 30, include_stats: bool = False + ) -> list[WorktreeInfo] | list[str]: """ - Backward compatibility: Get info about any existing spec worktree. - Prefer using get_worktree_info(spec_name) instead. - """ - worktrees = self.list_all_worktrees() - if worktrees: - return worktrees[0] - return None + Find worktrees that haven't been modified in the specified number of days. - def merge_staging(self, delete_after: bool = True) -> bool: - """ - Backward compatibility: Merge first found worktree. - Prefer using merge_worktree(spec_name) instead. - """ - worktrees = self.list_all_worktrees() - if worktrees: - return self.merge_worktree(worktrees[0].spec_name, delete_after) - return False + Args: + days_threshold: Number of days without activity to consider a worktree old (default: 30) + include_stats: If True, return full WorktreeInfo objects; if False, return just spec names - def remove_staging(self, delete_branch: bool = True) -> None: - """ - Backward compatibility: Remove first found worktree. - Prefer using remove_worktree(spec_name) instead. + Returns: + List of old worktrees (either WorktreeInfo objects or spec names based on include_stats) """ - worktrees = self.list_all_worktrees() - if worktrees: - self.remove_worktree(worktrees[0].spec_name, delete_branch) + old_worktrees = [] - def get_or_create_staging(self, spec_name: str) -> WorktreeInfo: - """ - Backward compatibility: Alias for get_or_create_worktree. - """ - return self.get_or_create_worktree(spec_name) + for worktree_info in self.list_all_worktrees(): + # Skip if we can't determine age + if worktree_info.days_since_last_commit is None: + continue + + if worktree_info.days_since_last_commit >= days_threshold: + if include_stats: + old_worktrees.append(worktree_info) + else: + old_worktrees.append(worktree_info.spec_name) - def staging_exists(self) -> bool: + return old_worktrees + + def cleanup_old_worktrees( + self, days_threshold: int = 30, dry_run: bool = False + ) -> tuple[list[str], list[str]]: """ - Backward compatibility: Check if any spec worktree exists. - Prefer using worktree_exists(spec_name) instead. + Remove worktrees that haven't been modified in the specified number of days. + + Args: + days_threshold: Number of days without activity to consider a worktree old (default: 30) + dry_run: If True, only report what would be removed without actually removing + + Returns: + Tuple of (removed_specs, failed_specs) containing spec names """ - return len(self.list_all_worktrees()) > 0 + old_worktrees = self.get_old_worktrees( + days_threshold=days_threshold, include_stats=True + ) + + if not old_worktrees: + print(f"No worktrees found older than {days_threshold} days.") + return ([], []) + + removed = [] + failed = [] + + if dry_run: + print(f"\n[DRY RUN] Would remove {len(old_worktrees)} old worktrees:") + for info in old_worktrees: + print( + f" - {info.spec_name} (last activity: {info.days_since_last_commit} days ago)" + ) + return ([], []) + + print(f"\nRemoving {len(old_worktrees)} old worktrees...") + for info in old_worktrees: + try: + self.remove_worktree(info.spec_name, delete_branch=True) + removed.append(info.spec_name) + print( + f" โœ“ Removed {info.spec_name} (last activity: {info.days_since_last_commit} days ago)" + ) + except Exception as e: + failed.append(info.spec_name) + print(f" โœ— Failed to remove {info.spec_name}: {e}") - def commit_in_staging(self, message: str) -> bool: + if removed: + print(f"\nSuccessfully removed {len(removed)} worktree(s).") + if failed: + print(f"Failed to remove {len(failed)} worktree(s).") + + return (removed, failed) + + def get_worktree_count_warning( + self, warning_threshold: int = 10, critical_threshold: int = 20 + ) -> str | None: """ - Backward compatibility: Commit in first found worktree. - Prefer using commit_in_worktree(spec_name, message) instead. + Check worktree count and return a warning message if threshold is exceeded. + + Args: + warning_threshold: Number of worktrees to trigger a warning (default: 10) + critical_threshold: Number of worktrees to trigger a critical warning (default: 20) + + Returns: + Warning message string if threshold exceeded, None otherwise """ worktrees = self.list_all_worktrees() - if worktrees: - return self.commit_in_worktree(worktrees[0].spec_name, message) - return False + count = len(worktrees) + + if count >= critical_threshold: + old_worktrees = self.get_old_worktrees(days_threshold=30) + old_count = len(old_worktrees) + return ( + f"CRITICAL: {count} worktrees detected! " + f"Consider cleaning up old worktrees ({old_count} are 30+ days old). " + f"Run cleanup to remove stale worktrees." + ) + elif count >= warning_threshold: + old_worktrees = self.get_old_worktrees(days_threshold=30) + old_count = len(old_worktrees) + return ( + f"WARNING: {count} worktrees detected. " + f"{old_count} are 30+ days old and may be safe to clean up." + ) - def has_uncommitted_changes(self, in_staging: bool = False) -> bool: - """Check if there are uncommitted changes.""" + return None + + def print_worktree_summary(self) -> None: + """Print a summary of all worktrees with age information.""" worktrees = self.list_all_worktrees() - if in_staging and worktrees: - cwd = worktrees[0].path - else: - cwd = None - result = self._run_git(["status", "--porcelain"], cwd=cwd) - return bool(result.stdout.strip()) + if not worktrees: + print("No worktrees found.") + return -# Keep STAGING_WORKTREE_NAME for backward compatibility in imports -STAGING_WORKTREE_NAME = "auto-claude" + print(f"\n{'=' * 80}") + print(f"Worktree Summary ({len(worktrees)} total)") + print(f"{'=' * 80}\n") + + # Group by age + recent = [] # < 7 days + week_old = [] # 7-30 days + month_old = [] # 30-90 days + very_old = [] # > 90 days + unknown_age = [] + + for info in worktrees: + if info.days_since_last_commit is None: + unknown_age.append(info) + elif info.days_since_last_commit < 7: + recent.append(info) + elif info.days_since_last_commit < 30: + week_old.append(info) + elif info.days_since_last_commit < 90: + month_old.append(info) + else: + very_old.append(info) + + def print_group(title: str, items: list[WorktreeInfo]): + if not items: + return + print(f"{title} ({len(items)}):") + for info in sorted(items, key=lambda x: x.spec_name): + age_str = ( + f"{info.days_since_last_commit}d ago" + if info.days_since_last_commit is not None + else "unknown" + ) + print(f" - {info.spec_name} (last activity: {age_str})") + print() + + print_group("Recent (< 7 days)", recent) + print_group("Week Old (7-30 days)", week_old) + print_group("Month Old (30-90 days)", month_old) + print_group("Very Old (> 90 days)", very_old) + print_group("Unknown Age", unknown_age) + + # Print cleanup suggestions + if month_old or very_old: + total_old = len(month_old) + len(very_old) + print(f"{'=' * 80}") + print( + f"๐Ÿ’ก Suggestion: {total_old} worktree(s) are 30+ days old and may be safe to clean up." + ) + print(" Review these worktrees and run cleanup if no longer needed.") + print(f"{'=' * 80}\n") diff --git a/apps/backend/ideation/config.py b/apps/backend/ideation/config.py index 9f650b78da..0f56a893d3 100644 --- a/apps/backend/ideation/config.py +++ b/apps/backend/ideation/config.py @@ -25,7 +25,7 @@ def __init__( include_roadmap_context: bool = True, include_kanban_context: bool = True, max_ideas_per_type: int = 5, - model: str = "claude-opus-4-5-20251101", + model: str = "sonnet", # Changed from "opus" (fix #433) thinking_level: str = "medium", refresh: bool = False, append: bool = False, diff --git a/apps/backend/ideation/generator.py b/apps/backend/ideation/generator.py index 4e3005040e..dcd347041b 100644 --- a/apps/backend/ideation/generator.py +++ b/apps/backend/ideation/generator.py @@ -17,7 +17,7 @@ sys.path.insert(0, str(Path(__file__).parent.parent)) from client import create_client -from phase_config import get_thinking_budget +from phase_config import get_thinking_budget, resolve_model_id from ui import print_status # Ideation types @@ -56,7 +56,7 @@ def __init__( self, project_dir: Path, output_dir: Path, - model: str = "claude-opus-4-5-20251101", + model: str = "sonnet", # Changed from "opus" (fix #433) thinking_level: str = "medium", max_ideas_per_type: int = 5, ): @@ -94,7 +94,7 @@ async def run_agent( client = create_client( self.project_dir, self.output_dir, - self.model, + resolve_model_id(self.model), max_thinking_tokens=self.thinking_budget, ) @@ -187,7 +187,7 @@ async def run_recovery_agent( client = create_client( self.project_dir, self.output_dir, - self.model, + resolve_model_id(self.model), max_thinking_tokens=self.thinking_budget, ) diff --git a/apps/backend/ideation/runner.py b/apps/backend/ideation/runner.py index 1e1537037a..c20d41f839 100644 --- a/apps/backend/ideation/runner.py +++ b/apps/backend/ideation/runner.py @@ -41,7 +41,7 @@ def __init__( include_roadmap_context: bool = True, include_kanban_context: bool = True, max_ideas_per_type: int = 5, - model: str = "claude-opus-4-5-20251101", + model: str = "sonnet", # Changed from "opus" (fix #433) thinking_level: str = "medium", refresh: bool = False, append: bool = False, diff --git a/apps/backend/ideation/types.py b/apps/backend/ideation/types.py index 7180f1e0f0..c2c391d630 100644 --- a/apps/backend/ideation/types.py +++ b/apps/backend/ideation/types.py @@ -31,6 +31,6 @@ class IdeationConfig: include_roadmap_context: bool = True include_kanban_context: bool = True max_ideas_per_type: int = 5 - model: str = "claude-opus-4-5-20251101" + model: str = "sonnet" # Changed from "opus" (fix #433) refresh: bool = False append: bool = False # If True, preserve existing ideas when merging diff --git a/apps/backend/init.py b/apps/backend/init.py index c6aee373d4..5f1962b44e 100644 --- a/apps/backend/init.py +++ b/apps/backend/init.py @@ -6,6 +6,32 @@ from pathlib import Path +# All entries that should be added to .gitignore for auto-claude projects +AUTO_CLAUDE_GITIGNORE_ENTRIES = [ + ".auto-claude/", + ".auto-claude-security.json", + ".auto-claude-status", + ".claude_settings.json", + ".worktrees/", + ".security-key", + "logs/security/", +] + + +def _entry_exists_in_gitignore(lines: list[str], entry: str) -> bool: + """Check if an entry already exists in gitignore (handles trailing slash variations).""" + entry_normalized = entry.rstrip("/") + for line in lines: + line_stripped = line.strip() + # Match both "entry" and "entry/" + if ( + line_stripped == entry + or line_stripped == entry_normalized + or line_stripped == entry_normalized + "/" + ): + return True + return False + def ensure_gitignore_entry(project_dir: Path, entry: str = ".auto-claude/") -> bool: """ @@ -27,17 +53,8 @@ def ensure_gitignore_entry(project_dir: Path, entry: str = ".auto-claude/") -> b content = gitignore_path.read_text() lines = content.splitlines() - # Check if entry already exists (exact match or with trailing newline variations) - entry_normalized = entry.rstrip("/") - for line in lines: - line_stripped = line.strip() - # Match both ".auto-claude" and ".auto-claude/" - if ( - line_stripped == entry - or line_stripped == entry_normalized - or line_stripped == entry_normalized + "/" - ): - return False # Already exists + if _entry_exists_in_gitignore(lines, entry): + return False # Already exists # Entry doesn't exist, append it # Ensure file ends with newline before adding our entry @@ -59,11 +76,58 @@ def ensure_gitignore_entry(project_dir: Path, entry: str = ".auto-claude/") -> b return True +def ensure_all_gitignore_entries(project_dir: Path) -> list[str]: + """ + Ensure all auto-claude related entries exist in the project's .gitignore file. + + Creates .gitignore if it doesn't exist. + + Args: + project_dir: The project root directory + + Returns: + List of entries that were added (empty if all already existed) + """ + gitignore_path = project_dir / ".gitignore" + added_entries: list[str] = [] + + # Read existing content or start fresh + if gitignore_path.exists(): + content = gitignore_path.read_text() + lines = content.splitlines() + else: + content = "" + lines = [] + + # Find entries that need to be added + entries_to_add = [ + entry + for entry in AUTO_CLAUDE_GITIGNORE_ENTRIES + if not _entry_exists_in_gitignore(lines, entry) + ] + + if not entries_to_add: + return [] + + # Build the new content to append + # Ensure file ends with newline before adding our entries + if content and not content.endswith("\n"): + content += "\n" + + content += "\n# Auto Claude generated files\n" + for entry in entries_to_add: + content += entry + "\n" + added_entries.append(entry) + + gitignore_path.write_text(content) + return added_entries + + def init_auto_claude_dir(project_dir: Path) -> tuple[Path, bool]: """ Initialize the .auto-claude directory for a project. - Creates the directory if needed and ensures it's in .gitignore. + Creates the directory if needed and ensures all auto-claude files are in .gitignore. Args: project_dir: The project root directory @@ -78,16 +142,18 @@ def init_auto_claude_dir(project_dir: Path) -> tuple[Path, bool]: dir_created = not auto_claude_dir.exists() auto_claude_dir.mkdir(parents=True, exist_ok=True) - # Ensure .auto-claude is in .gitignore (only on first creation) + # Ensure all auto-claude entries are in .gitignore (only on first creation) gitignore_updated = False if dir_created: - gitignore_updated = ensure_gitignore_entry(project_dir, ".auto-claude/") + added = ensure_all_gitignore_entries(project_dir) + gitignore_updated = len(added) > 0 else: # Even if dir exists, check gitignore on first run # Use a marker file to track if we've already checked marker = auto_claude_dir / ".gitignore_checked" if not marker.exists(): - gitignore_updated = ensure_gitignore_entry(project_dir, ".auto-claude/") + added = ensure_all_gitignore_entries(project_dir) + gitignore_updated = len(added) > 0 marker.touch() return auto_claude_dir, gitignore_updated @@ -109,3 +175,36 @@ def get_auto_claude_dir(project_dir: Path, ensure_exists: bool = True) -> Path: return auto_claude_dir return Path(project_dir) / ".auto-claude" + + +def repair_gitignore(project_dir: Path) -> list[str]: + """ + Repair an existing project's .gitignore to include all auto-claude entries. + + This is useful for projects created before all entries were being added, + or when gitignore entries were manually removed. + + Also resets the .gitignore_checked marker to allow future updates. + + Args: + project_dir: The project root directory + + Returns: + List of entries that were added (empty if all already existed) + """ + project_dir = Path(project_dir) + auto_claude_dir = project_dir / ".auto-claude" + + # Remove the marker file so future checks will also run + marker = auto_claude_dir / ".gitignore_checked" + if marker.exists(): + marker.unlink() + + # Add all missing entries + added = ensure_all_gitignore_entries(project_dir) + + # Re-create the marker + if auto_claude_dir.exists(): + marker.touch() + + return added diff --git a/apps/backend/integrations/graphiti/config.py b/apps/backend/integrations/graphiti/config.py index f2af6fd32f..4dbbc3e61e 100644 --- a/apps/backend/integrations/graphiti/config.py +++ b/apps/backend/integrations/graphiti/config.py @@ -622,10 +622,23 @@ def get_graphiti_status() -> dict: status["errors"] = errors # Errors are informational - embedder is optional (keyword search fallback) - # Available if is_valid() returns True (just needs enabled flag) - status["available"] = config.is_valid() - if not status["available"]: + # CRITICAL FIX: Actually verify packages are importable before reporting available + # Don't just check config.is_valid() - actually try to import the module + if not config.is_valid(): status["reason"] = errors[0] if errors else "Configuration invalid" + return status + + # Try importing the required Graphiti packages + try: + # Attempt to import the main graphiti_memory module + import graphiti_core # noqa: F401 + from graphiti_core.driver.falkordb_driver import FalkorDriver # noqa: F401 + + # If we got here, packages are importable + status["available"] = True + except ImportError as e: + status["available"] = False + status["reason"] = f"Graphiti packages not installed: {e}" return status diff --git a/apps/backend/integrations/graphiti/queries_pkg/client.py b/apps/backend/integrations/graphiti/queries_pkg/client.py index c1961484ac..3808d9d561 100644 --- a/apps/backend/integrations/graphiti/queries_pkg/client.py +++ b/apps/backend/integrations/graphiti/queries_pkg/client.py @@ -34,8 +34,25 @@ def _apply_ladybug_monkeypatch() -> bool: sys.modules["kuzu"] = real_ladybug logger.info("Applied LadybugDB monkeypatch (kuzu -> real_ladybug)") return True - except ImportError: - pass + except ImportError as e: + logger.debug(f"LadybugDB import failed: {e}") + # On Windows with Python 3.12+, provide more specific error details + # (pywin32 is only required for Python 3.12+ per requirements.txt) + if sys.platform == "win32" and sys.version_info >= (3, 12): + # Check if it's the pywin32 error using both name attribute and string match + # for robustness across Python versions + is_pywin32_error = ( + (hasattr(e, "name") and e.name in ("pywintypes", "pywin32", "win32api")) + or "pywintypes" in str(e) + or "pywin32" in str(e) + ) + if is_pywin32_error: + logger.error( + "LadybugDB requires pywin32 on Windows. " + "Install with: pip install pywin32>=306" + ) + else: + logger.debug(f"Windows-specific import issue: {e}") # Fall back to native kuzu try: diff --git a/apps/backend/integrations/linear/updater.py b/apps/backend/integrations/linear/updater.py index d102642fab..02d3880cfc 100644 --- a/apps/backend/integrations/linear/updater.py +++ b/apps/backend/integrations/linear/updater.py @@ -118,6 +118,7 @@ def _create_linear_client() -> ClaudeSDKClient: get_sdk_env_vars, require_auth_token, ) + from phase_config import resolve_model_id require_auth_token() # Raises ValueError if no token found ensure_claude_code_oauth_token() @@ -130,7 +131,7 @@ def _create_linear_client() -> ClaudeSDKClient: return ClaudeSDKClient( options=ClaudeAgentOptions( - model="claude-haiku-4-5", # Fast & cheap model for simple API calls + model=resolve_model_id("haiku"), # Resolves via API Profile if configured system_prompt="You are a Linear API assistant. Execute the requested Linear operation precisely.", allowed_tools=LINEAR_TOOLS, mcp_servers={ diff --git a/apps/backend/merge/__init__.py b/apps/backend/merge/__init__.py index 99dc35d269..7ac715a964 100644 --- a/apps/backend/merge/__init__.py +++ b/apps/backend/merge/__init__.py @@ -9,7 +9,7 @@ traditional merge conflicts. Components: -- SemanticAnalyzer: Tree-sitter based semantic change extraction +- SemanticAnalyzer: Regex-based semantic change extraction - ConflictDetector: Rule-based conflict detection and compatibility analysis - AutoMerger: Deterministic merge strategies (no AI needed) - AIResolver: Minimal-context AI resolution for ambiguous conflicts diff --git a/apps/backend/merge/ai_resolver/claude_client.py b/apps/backend/merge/ai_resolver/claude_client.py index 77229043c5..40e118f923 100644 --- a/apps/backend/merge/ai_resolver/claude_client.py +++ b/apps/backend/merge/ai_resolver/claude_client.py @@ -82,7 +82,9 @@ async def _run_merge() -> str: msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text logger.info(f"AI merge response: {len(response_text)} chars") diff --git a/apps/backend/merge/file_evolution/modification_tracker.py b/apps/backend/merge/file_evolution/modification_tracker.py index b4cc281ae6..6d75237eb7 100644 --- a/apps/backend/merge/file_evolution/modification_tracker.py +++ b/apps/backend/merge/file_evolution/modification_tracker.py @@ -68,6 +68,7 @@ def record_modification( new_content: str, evolutions: dict[str, FileEvolution], raw_diff: str | None = None, + skip_semantic_analysis: bool = False, ) -> TaskSnapshot | None: """ Record a file modification by a task. @@ -79,6 +80,9 @@ def record_modification( new_content: File content after modification evolutions: Current evolution data (will be updated) raw_diff: Optional unified diff for reference + skip_semantic_analysis: If True, skip expensive semantic analysis. + Use this for lightweight file tracking when only conflict + detection is needed (not conflict resolution). Returns: Updated TaskSnapshot, or None if file not being tracked @@ -87,8 +91,8 @@ def record_modification( # Get or create evolution if rel_path not in evolutions: - logger.warning(f"File {rel_path} not being tracked") - # Note: We could auto-create here, but for now return None + # Debug level: this is expected for files not in baseline (e.g., from main's changes) + logger.debug(f"File {rel_path} not in evolution tracking - skipping") return None evolution = evolutions.get(rel_path) @@ -105,9 +109,19 @@ def record_modification( content_hash_before=compute_content_hash(old_content), ) - # Analyze semantic changes - analysis = self.analyzer.analyze_diff(rel_path, old_content, new_content) - semantic_changes = analysis.changes + # Analyze semantic changes (or skip for lightweight tracking) + if skip_semantic_analysis: + # Fast path: just track the file change without analysis + # This is used for files that don't have conflicts + semantic_changes = [] + debug( + MODULE, + f"Skipping semantic analysis for {rel_path} (lightweight tracking)", + ) + else: + # Full analysis (only for conflict files) + analysis = self.analyzer.analyze_diff(rel_path, old_content, new_content) + semantic_changes = analysis.changes # Update snapshot snapshot.completed_at = datetime.now() @@ -121,6 +135,7 @@ def record_modification( logger.info( f"Recorded modification to {rel_path} by {task_id}: " f"{len(semantic_changes)} semantic changes" + + (" (lightweight)" if skip_semantic_analysis else "") ) return snapshot @@ -130,6 +145,7 @@ def refresh_from_git( worktree_path: Path, evolutions: dict[str, FileEvolution], target_branch: str | None = None, + analyze_only_files: set[str] | None = None, ) -> None: """ Refresh task snapshots by analyzing git diff from worktree. @@ -142,6 +158,10 @@ def refresh_from_git( worktree_path: Path to the task's worktree evolutions: Current evolution data (will be updated) target_branch: Branch to compare against (default: detect from worktree) + analyze_only_files: If provided, only run full semantic analysis on + these files. Other files will be tracked with lightweight mode + (no semantic analysis). This optimizes performance by only + analyzing files that have actual conflicts. """ # Determine the target branch to compare against if not target_branch: @@ -154,12 +174,27 @@ def refresh_from_git( task_id=task_id, worktree_path=str(worktree_path), target_branch=target_branch, + analyze_only_files=list(analyze_only_files)[:10] + if analyze_only_files + else "all", ) try: - # Get list of files changed in the worktree vs target branch + # Get the merge-base to accurately identify task-only changes + # Using two-dot diff (merge-base..HEAD) returns only files changed by the task, + # not files changed on the target branch since divergence + merge_base_result = subprocess.run( + ["git", "merge-base", target_branch, "HEAD"], + cwd=worktree_path, + capture_output=True, + text=True, + check=True, + ) + merge_base = merge_base_result.stdout.strip() + + # Get list of files changed in the worktree since the merge-base result = subprocess.run( - ["git", "diff", "--name-only", f"{target_branch}...HEAD"], + ["git", "diff", "--name-only", f"{merge_base}..HEAD"], cwd=worktree_path, capture_output=True, text=True, @@ -175,55 +210,103 @@ def refresh_from_git( else changed_files, ) + processed_count = 0 for file_path in changed_files: - # Get the diff for this file - diff_result = subprocess.run( - ["git", "diff", f"{target_branch}...HEAD", "--", file_path], - cwd=worktree_path, - capture_output=True, - text=True, - check=True, - ) - - # Get content before (from target branch) and after (current) try: - show_result = subprocess.run( - ["git", "show", f"{target_branch}:{file_path}"], + # Get the diff for this file (using merge-base for accurate task-only diff) + diff_result = subprocess.run( + ["git", "diff", f"{merge_base}..HEAD", "--", file_path], cwd=worktree_path, capture_output=True, text=True, check=True, ) - old_content = show_result.stdout - except subprocess.CalledProcessError: - # File is new - old_content = "" - current_file = worktree_path / file_path - if current_file.exists(): + # Get content before (from merge-base - the point where task branched) try: - new_content = current_file.read_text(encoding="utf-8") - except UnicodeDecodeError: - new_content = current_file.read_text( - encoding="utf-8", errors="replace" + show_result = subprocess.run( + ["git", "show", f"{merge_base}:{file_path}"], + cwd=worktree_path, + capture_output=True, + text=True, + check=True, + ) + old_content = show_result.stdout + except subprocess.CalledProcessError: + # File is new + old_content = "" + + current_file = worktree_path / file_path + if current_file.exists(): + try: + new_content = current_file.read_text(encoding="utf-8") + except UnicodeDecodeError: + new_content = current_file.read_text( + encoding="utf-8", errors="replace" + ) + else: + # File was deleted + new_content = "" + + # Auto-create FileEvolution entry if not already tracked + # This handles retroactive tracking when capture_baselines wasn't called + rel_path = self.storage.get_relative_path(file_path) + if rel_path not in evolutions: + evolutions[rel_path] = FileEvolution( + file_path=rel_path, + baseline_commit=merge_base, + baseline_captured_at=datetime.now(), + baseline_content_hash=compute_content_hash(old_content), + baseline_snapshot_path="", # Not storing baseline file + task_snapshots=[], + ) + debug( + MODULE, + f"Auto-created evolution entry for {rel_path}", + baseline_commit=merge_base[:8], ) - else: - # File was deleted - new_content = "" - - # Record the modification - self.record_modification( - task_id=task_id, - file_path=file_path, - old_content=old_content, - new_content=new_content, - evolutions=evolutions, - raw_diff=diff_result.stdout, - ) - logger.info( - f"Refreshed {len(changed_files)} files from worktree for task {task_id}" - ) + # Determine if this file needs full semantic analysis + # If analyze_only_files is provided, only analyze files in that set + # Otherwise, analyze all files (backward compatible) + skip_analysis = False + if analyze_only_files is not None: + skip_analysis = rel_path not in analyze_only_files + + # Record the modification + self.record_modification( + task_id=task_id, + file_path=file_path, + old_content=old_content, + new_content=new_content, + evolutions=evolutions, + raw_diff=diff_result.stdout, + skip_semantic_analysis=skip_analysis, + ) + processed_count += 1 + + except subprocess.CalledProcessError as e: + # Log error but continue with remaining files + logger.warning( + f"Failed to process {file_path} in refresh_from_git: {e}" + ) + continue + + # Calculate how many files were fully analyzed vs just tracked + if analyze_only_files is not None: + analyzed_count = len( + [f for f in changed_files if f in analyze_only_files] + ) + tracked_only_count = processed_count - analyzed_count + logger.info( + f"Refreshed {processed_count}/{len(changed_files)} files from worktree for task {task_id} " + f"(analyzed: {analyzed_count}, tracked only: {tracked_only_count})" + ) + else: + logger.info( + f"Refreshed {processed_count}/{len(changed_files)} files from worktree for task {task_id} " + "(full analysis on all files)" + ) except subprocess.CalledProcessError as e: logger.error(f"Failed to refresh from git: {e}") @@ -248,35 +331,23 @@ def mark_task_completed( def _detect_target_branch(self, worktree_path: Path) -> str: """ - Detect the target branch to compare against for a worktree. + Detect the base branch to compare against for a worktree. + + This finds the branch that the worktree was created FROM by looking + for common branch names (main, master, develop) that have a valid + merge-base with the worktree. - This finds the branch that the worktree was created from by looking - at the merge-base between the worktree and common branch names. + Note: We don't use upstream tracking because that returns the worktree's + own branch (e.g., origin/auto-claude/...) rather than the base branch. Args: worktree_path: Path to the worktree Returns: - The detected target branch name, defaults to 'main' if detection fails + The detected base branch name, defaults to 'main' if detection fails """ - # Try to get the upstream tracking branch - try: - result = subprocess.run( - ["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"], - cwd=worktree_path, - capture_output=True, - text=True, - ) - if result.returncode == 0 and result.stdout.strip(): - upstream = result.stdout.strip() - # Extract branch name from origin/branch format - if "/" in upstream: - return upstream.split("/", 1)[1] - return upstream - except subprocess.CalledProcessError: - pass - # Try common branch names and find which one has a valid merge-base + # This is the reliable way to find what branch the worktree diverged from for branch in ["main", "master", "develop"]: try: result = subprocess.run( @@ -286,14 +357,39 @@ def _detect_target_branch(self, worktree_path: Path) -> str: text=True, ) if result.returncode == 0: + debug( + MODULE, + f"Detected base branch: {branch}", + worktree_path=str(worktree_path), + ) return branch except subprocess.CalledProcessError: continue - # Default to main + # Before defaulting to 'main', verify it exists + # This handles non-standard projects that use trunk, production, etc. + try: + result = subprocess.run( + ["git", "rev-parse", "--verify", "main"], + cwd=worktree_path, + capture_output=True, + text=True, + ) + if result.returncode == 0: + debug_warning( + MODULE, + "Could not find merge-base with standard branches, defaulting to 'main'", + worktree_path=str(worktree_path), + ) + return "main" + except subprocess.CalledProcessError: + pass + + # Last resort: use HEAD~10 as a fallback comparison point + # This allows modification tracking even on non-standard branch setups debug_warning( MODULE, - "Could not detect target branch, defaulting to 'main'", + "No standard base branch found, modification tracking may be limited", worktree_path=str(worktree_path), ) - return "main" + return "HEAD~10" diff --git a/apps/backend/merge/file_evolution/tracker.py b/apps/backend/merge/file_evolution/tracker.py index c9df3b1a68..2a8d248eb4 100644 --- a/apps/backend/merge/file_evolution/tracker.py +++ b/apps/backend/merge/file_evolution/tracker.py @@ -327,6 +327,7 @@ def refresh_from_git( task_id: str, worktree_path: Path, target_branch: str | None = None, + analyze_only_files: set[str] | None = None, ) -> None: """ Refresh task snapshots by analyzing git diff from worktree. @@ -338,11 +339,16 @@ def refresh_from_git( task_id: The task identifier worktree_path: Path to the task's worktree target_branch: Branch to compare against (default: auto-detect) + analyze_only_files: If provided, only run full semantic analysis on + these files. Other files will be tracked with lightweight mode + (no semantic analysis). This optimizes performance by only + analyzing files that have actual conflicts. """ self.modification_tracker.refresh_from_git( task_id=task_id, worktree_path=worktree_path, evolutions=self._evolutions, target_branch=target_branch, + analyze_only_files=analyze_only_files, ) self._save_evolutions() diff --git a/apps/backend/merge/file_merger.py b/apps/backend/merge/file_merger.py index 1038055554..7fc3c35dc7 100644 --- a/apps/backend/merge/file_merger.py +++ b/apps/backend/merge/file_merger.py @@ -19,6 +19,35 @@ from .types import ChangeType, SemanticChange, TaskSnapshot +def detect_line_ending(content: str) -> str: + """ + Detect line ending style in content using priority-based detection. + + Uses a priority order (CRLF > CR > LF) to detect the line ending style. + CRLF is checked first because it contains LF, so presence of any CRLF + indicates Windows-style endings. This approach is fast and works well + for files that consistently use one style. + + Note: This returns the first detected style by priority, not the most + frequent style. For files with mixed line endings, consider normalizing + to a single style before processing. + + Args: + content: File content to analyze + + Returns: + The detected line ending string: "\\r\\n", "\\r", or "\\n" + """ + # Check for CRLF first (Windows) - must check before LF since CRLF contains LF + if "\r\n" in content: + return "\r\n" + # Check for CR (classic Mac, rare but possible) + if "\r" in content: + return "\r" + # Default to LF (Unix/modern Mac) + return "\n" + + def apply_single_task_changes( baseline: str, snapshot: TaskSnapshot, @@ -35,7 +64,16 @@ def apply_single_task_changes( Returns: Modified content with changes applied """ - content = baseline + # Detect line ending style before normalizing + original_line_ending = detect_line_ending(baseline) + + # Normalize to LF for consistent matching with regex_analyzer output + # The regex_analyzer normalizes content to LF when extracting content_before/after, + # so we must also normalize baseline to ensure replace() matches correctly + content = baseline.replace("\r\n", "\n").replace("\r", "\n") + + # Use LF for internal processing + line_ending = "\n" for change in snapshot.semantic_changes: if change.content_before and change.content_after: @@ -45,13 +83,19 @@ def apply_single_task_changes( # Addition - need to determine where to add if change.change_type == ChangeType.ADD_IMPORT: # Add import at top - lines = content.split("\n") + lines = content.splitlines() import_end = find_import_end(lines, file_path) lines.insert(import_end, change.content_after) - content = "\n".join(lines) + content = line_ending.join(lines) elif change.change_type == ChangeType.ADD_FUNCTION: # Add function at end (before exports) - content += f"\n\n{change.content_after}" + content += f"{line_ending}{line_ending}{change.content_after}" + + # Restore original line ending style if it was CRLF + if original_line_ending == "\r\n": + content = content.replace("\n", "\r\n") + elif original_line_ending == "\r": + content = content.replace("\n", "\r") return content @@ -72,7 +116,16 @@ def combine_non_conflicting_changes( Returns: Combined content with all changes applied """ - content = baseline + # Detect line ending style before normalizing + original_line_ending = detect_line_ending(baseline) + + # Normalize to LF for consistent matching with regex_analyzer output + # The regex_analyzer normalizes content to LF when extracting content_before/after, + # so we must also normalize baseline to ensure replace() matches correctly + content = baseline.replace("\r\n", "\n").replace("\r", "\n") + + # Use LF for internal processing + line_ending = "\n" # Group changes by type for proper ordering imports: list[SemanticChange] = [] @@ -96,13 +149,13 @@ def combine_non_conflicting_changes( # Add imports if imports: - lines = content.split("\n") + lines = content.splitlines() import_end = find_import_end(lines, file_path) for imp in imports: if imp.content_after and imp.content_after not in content: lines.insert(import_end, imp.content_after) import_end += 1 - content = "\n".join(lines) + content = line_ending.join(lines) # Apply modifications for mod in modifications: @@ -112,15 +165,21 @@ def combine_non_conflicting_changes( # Add functions for func in functions: if func.content_after: - content += f"\n\n{func.content_after}" + content += f"{line_ending}{line_ending}{func.content_after}" # Apply other changes for change in other: if change.content_after and not change.content_before: - content += f"\n{change.content_after}" + content += f"{line_ending}{change.content_after}" elif change.content_before and change.content_after: content = content.replace(change.content_before, change.content_after) + # Restore original line ending style if it was CRLF + if original_line_ending == "\r\n": + content = content.replace("\n", "\r\n") + elif original_line_ending == "\r": + content = content.replace("\n", "\r") + return content diff --git a/apps/backend/merge/git_utils.py b/apps/backend/merge/git_utils.py index 92bfd40f7b..6868d0d015 100644 --- a/apps/backend/merge/git_utils.py +++ b/apps/backend/merge/git_utils.py @@ -27,28 +27,19 @@ def find_worktree(project_dir: Path, task_id: str) -> Path | None: Returns: Path to the worktree, or None if not found """ - # Check common locations - worktrees_dir = project_dir / ".worktrees" - if worktrees_dir.exists(): - # Look for worktree with task_id in name - for entry in worktrees_dir.iterdir(): + # Check new path first + new_worktrees_dir = project_dir / ".auto-claude" / "worktrees" / "tasks" + if new_worktrees_dir.exists(): + for entry in new_worktrees_dir.iterdir(): if entry.is_dir() and task_id in entry.name: return entry - # Try git worktree list - try: - result = subprocess.run( - ["git", "worktree", "list", "--porcelain"], - cwd=project_dir, - capture_output=True, - text=True, - check=True, - ) - for line in result.stdout.split("\n"): - if line.startswith("worktree ") and task_id in line: - return Path(line.split(" ", 1)[1]) - except subprocess.CalledProcessError: - pass + # Legacy fallback for backwards compatibility + legacy_worktrees_dir = project_dir / ".worktrees" + if legacy_worktrees_dir.exists(): + for entry in legacy_worktrees_dir.iterdir(): + if entry.is_dir() and task_id in entry.name: + return entry return None diff --git a/apps/backend/merge/semantic_analysis/__init__.py b/apps/backend/merge/semantic_analysis/__init__.py index e06d039969..0f4cc099c4 100644 --- a/apps/backend/merge/semantic_analysis/__init__.py +++ b/apps/backend/merge/semantic_analysis/__init__.py @@ -1,12 +1,10 @@ """ -Semantic analyzer package for AST-based code analysis. +Semantic analyzer package for code analysis. This package provides modular semantic analysis capabilities: - models.py: Data structures for extracted elements -- python_analyzer.py: Python-specific AST extraction -- js_analyzer.py: JavaScript/TypeScript-specific AST extraction - comparison.py: Element comparison and change classification -- regex_analyzer.py: Fallback regex-based analysis +- regex_analyzer.py: Regex-based analysis for code changes """ from .models import ExtractedElement diff --git a/apps/backend/merge/semantic_analysis/js_analyzer.py b/apps/backend/merge/semantic_analysis/js_analyzer.py deleted file mode 100644 index 048d03acba..0000000000 --- a/apps/backend/merge/semantic_analysis/js_analyzer.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -JavaScript/TypeScript-specific semantic analysis using tree-sitter. -""" - -from __future__ import annotations - -from collections.abc import Callable - -from .models import ExtractedElement - -try: - from tree_sitter import Node -except ImportError: - Node = None - - -def extract_js_elements( - node: Node, - elements: dict[str, ExtractedElement], - get_text: Callable[[Node], str], - get_line: Callable[[int], int], - ext: str, - parent: str | None = None, -) -> None: - """ - Extract structural elements from JavaScript/TypeScript AST. - - Args: - node: The tree-sitter node to extract from - elements: Dictionary to populate with extracted elements - get_text: Function to extract text from a node - get_line: Function to convert byte position to line number - ext: File extension (.js, .jsx, .ts, .tsx) - parent: Parent element name for nested elements - """ - for child in node.children: - if child.type == "import_statement": - text = get_text(child) - # Try to extract the source module - source_node = child.child_by_field_name("source") - if source_node: - source = get_text(source_node).strip("'\"") - elements[f"import:{source}"] = ExtractedElement( - element_type="import", - name=source, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=text, - ) - - elif child.type in {"function_declaration", "function"}: - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - full_name = f"{parent}.{name}" if parent else name - elements[f"function:{full_name}"] = ExtractedElement( - element_type="function", - name=full_name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - parent=parent, - ) - - elif child.type == "arrow_function": - # Arrow functions are usually assigned to variables - # We'll catch these via variable declarations - pass - - elif child.type in {"lexical_declaration", "variable_declaration"}: - # const/let/var declarations - for declarator in child.children: - if declarator.type == "variable_declarator": - name_node = declarator.child_by_field_name("name") - value_node = declarator.child_by_field_name("value") - if name_node: - name = get_text(name_node) - content = get_text(child) - - # Check if it's a function (arrow function or function expression) - is_function = False - if value_node and value_node.type in { - "arrow_function", - "function", - }: - is_function = True - elements[f"function:{name}"] = ExtractedElement( - element_type="function", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=content, - parent=parent, - ) - else: - elements[f"variable:{name}"] = ExtractedElement( - element_type="variable", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=content, - parent=parent, - ) - - elif child.type == "class_declaration": - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - elements[f"class:{name}"] = ExtractedElement( - element_type="class", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - ) - # Recurse into class body - body = child.child_by_field_name("body") - if body: - extract_js_elements( - body, elements, get_text, get_line, ext, parent=name - ) - - elif child.type == "method_definition": - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - full_name = f"{parent}.{name}" if parent else name - elements[f"method:{full_name}"] = ExtractedElement( - element_type="method", - name=full_name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - parent=parent, - ) - - elif child.type == "export_statement": - # Recurse into exports to find the actual declaration - extract_js_elements(child, elements, get_text, get_line, ext, parent) - - # TypeScript specific - elif child.type in {"interface_declaration", "type_alias_declaration"}: - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - elem_type = "interface" if "interface" in child.type else "type" - elements[f"{elem_type}:{name}"] = ExtractedElement( - element_type=elem_type, - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - ) - - # Recurse into statement blocks - elif child.type in {"program", "statement_block", "class_body"}: - extract_js_elements(child, elements, get_text, get_line, ext, parent) diff --git a/apps/backend/merge/semantic_analysis/python_analyzer.py b/apps/backend/merge/semantic_analysis/python_analyzer.py deleted file mode 100644 index def71a943b..0000000000 --- a/apps/backend/merge/semantic_analysis/python_analyzer.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -Python-specific semantic analysis using tree-sitter. -""" - -from __future__ import annotations - -from collections.abc import Callable - -from .models import ExtractedElement - -try: - from tree_sitter import Node -except ImportError: - Node = None - - -def extract_python_elements( - node: Node, - elements: dict[str, ExtractedElement], - get_text: Callable[[Node], str], - get_line: Callable[[int], int], - parent: str | None = None, -) -> None: - """ - Extract structural elements from Python AST. - - Args: - node: The tree-sitter node to extract from - elements: Dictionary to populate with extracted elements - get_text: Function to extract text from a node - get_line: Function to convert byte position to line number - parent: Parent element name for nested elements - """ - for child in node.children: - if child.type == "import_statement": - # import x, y - text = get_text(child) - # Extract module names - for name_node in child.children: - if name_node.type == "dotted_name": - name = get_text(name_node) - elements[f"import:{name}"] = ExtractedElement( - element_type="import", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=text, - ) - - elif child.type == "import_from_statement": - # from x import y, z - text = get_text(child) - module = None - for sub in child.children: - if sub.type == "dotted_name": - module = get_text(sub) - break - if module: - elements[f"import_from:{module}"] = ExtractedElement( - element_type="import_from", - name=module, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=text, - ) - - elif child.type == "function_definition": - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - full_name = f"{parent}.{name}" if parent else name - elements[f"function:{full_name}"] = ExtractedElement( - element_type="function", - name=full_name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - parent=parent, - ) - - elif child.type == "class_definition": - name_node = child.child_by_field_name("name") - if name_node: - name = get_text(name_node) - elements[f"class:{name}"] = ExtractedElement( - element_type="class", - name=name, - start_line=get_line(child.start_byte), - end_line=get_line(child.end_byte), - content=get_text(child), - ) - # Recurse into class body for methods - body = child.child_by_field_name("body") - if body: - extract_python_elements( - body, elements, get_text, get_line, parent=name - ) - - elif child.type == "decorated_definition": - # Handle decorated functions/classes - for sub in child.children: - if sub.type in {"function_definition", "class_definition"}: - extract_python_elements(child, elements, get_text, get_line, parent) - break - - # Recurse for other compound statements - elif child.type in { - "if_statement", - "while_statement", - "for_statement", - "try_statement", - "with_statement", - }: - extract_python_elements(child, elements, get_text, get_line, parent) diff --git a/apps/backend/merge/semantic_analysis/regex_analyzer.py b/apps/backend/merge/semantic_analysis/regex_analyzer.py index 40556f765c..9ceff32bee 100644 --- a/apps/backend/merge/semantic_analysis/regex_analyzer.py +++ b/apps/backend/merge/semantic_analysis/regex_analyzer.py @@ -1,5 +1,5 @@ """ -Regex-based fallback analysis when tree-sitter is not available. +Regex-based semantic analysis for code changes. """ from __future__ import annotations @@ -17,7 +17,7 @@ def analyze_with_regex( ext: str, ) -> FileAnalysis: """ - Fallback analysis using regex when tree-sitter isn't available. + Analyze code changes using regex patterns. Args: file_path: Path to the file being analyzed @@ -30,11 +30,16 @@ def analyze_with_regex( """ changes: list[SemanticChange] = [] + # Normalize line endings to LF for consistent cross-platform behavior + # This handles Windows CRLF, old Mac CR, and Unix LF + before_normalized = before.replace("\r\n", "\n").replace("\r", "\n") + after_normalized = after.replace("\r\n", "\n").replace("\r", "\n") + # Get a unified diff diff = list( difflib.unified_diff( - before.splitlines(keepends=True), - after.splitlines(keepends=True), + before_normalized.splitlines(keepends=True), + after_normalized.splitlines(keepends=True), lineterm="", ) ) @@ -89,8 +94,22 @@ def analyze_with_regex( # Detect function changes (simplified) func_pattern = get_function_pattern(ext) if func_pattern: - funcs_before = set(func_pattern.findall(before)) - funcs_after = set(func_pattern.findall(after)) + # For JS/TS patterns with alternation, findall() returns tuples + # Extract the non-empty match from each tuple + def extract_func_names(matches): + names = set() + for match in matches: + if isinstance(match, tuple): + # Get the first non-empty group from the tuple + name = next((m for m in match if m), None) + if name: + names.add(name) + elif match: + names.add(match) + return names + + funcs_before = extract_func_names(func_pattern.findall(before_normalized)) + funcs_after = extract_func_names(func_pattern.findall(after_normalized)) for func in funcs_after - funcs_before: changes.append( diff --git a/apps/backend/merge/semantic_analyzer.py b/apps/backend/merge/semantic_analyzer.py index 07aea59056..30697c1a94 100644 --- a/apps/backend/merge/semantic_analyzer.py +++ b/apps/backend/merge/semantic_analyzer.py @@ -2,32 +2,27 @@ Semantic Analyzer ================= -Analyzes code changes at a semantic level using tree-sitter. +Analyzes code changes at a semantic level using regex-based heuristics. -This module provides AST-based analysis of code changes, extracting -meaningful semantic changes like "added import", "modified function", -"wrapped JSX element" rather than line-level diffs. - -When tree-sitter is not available, falls back to regex-based heuristics. +This module provides analysis of code changes, extracting meaningful +semantic changes like "added import", "modified function", "wrapped JSX element" +rather than line-level diffs. """ from __future__ import annotations import logging from pathlib import Path -from typing import Any -from .types import ChangeType, FileAnalysis +from .types import FileAnalysis # Import debug utilities try: from debug import ( debug, debug_detailed, - debug_error, debug_success, debug_verbose, - is_debug_enabled, ) except ImportError: # Fallback if debug module not available @@ -43,71 +38,18 @@ def debug_verbose(*args, **kwargs): def debug_success(*args, **kwargs): pass - def debug_error(*args, **kwargs): - pass - - def is_debug_enabled(): - return False - logger = logging.getLogger(__name__) MODULE = "merge.semantic_analyzer" -# Try to import tree-sitter - it's optional but recommended -TREE_SITTER_AVAILABLE = False -try: - import tree_sitter # noqa: F401 - from tree_sitter import Language, Node, Parser, Tree - - TREE_SITTER_AVAILABLE = True - logger.info("tree-sitter available, using AST-based analysis") -except ImportError: - logger.warning("tree-sitter not available, using regex-based fallback") - Tree = None - Node = None - -# Try to import language bindings -LANGUAGES_AVAILABLE: dict[str, Any] = {} -if TREE_SITTER_AVAILABLE: - try: - import tree_sitter_python as tspython - - LANGUAGES_AVAILABLE[".py"] = tspython.language() - except ImportError: - pass - - try: - import tree_sitter_javascript as tsjs - - LANGUAGES_AVAILABLE[".js"] = tsjs.language() - LANGUAGES_AVAILABLE[".jsx"] = tsjs.language() - except ImportError: - pass - - try: - import tree_sitter_typescript as tsts - - LANGUAGES_AVAILABLE[".ts"] = tsts.language_typescript() - LANGUAGES_AVAILABLE[".tsx"] = tsts.language_tsx() - except ImportError: - pass - -# Import our modular components -from .semantic_analysis.comparison import compare_elements +# Import regex-based analyzer from .semantic_analysis.models import ExtractedElement from .semantic_analysis.regex_analyzer import analyze_with_regex -if TREE_SITTER_AVAILABLE: - from .semantic_analysis.js_analyzer import extract_js_elements - from .semantic_analysis.python_analyzer import extract_python_elements - class SemanticAnalyzer: """ - Analyzes code changes at a semantic level. - - Uses tree-sitter for AST-based analysis when available, - falling back to regex-based heuristics when not. + Analyzes code changes at a semantic level using regex-based heuristics. Example: analyzer = SemanticAnalyzer() @@ -117,28 +59,8 @@ class SemanticAnalyzer: """ def __init__(self): - """Initialize the analyzer with available parsers.""" - self._parsers: dict[str, Parser] = {} - - debug( - MODULE, - "Initializing SemanticAnalyzer", - tree_sitter_available=TREE_SITTER_AVAILABLE, - ) - - if TREE_SITTER_AVAILABLE: - for ext, lang in LANGUAGES_AVAILABLE.items(): - parser = Parser() - parser.language = Language(lang) - self._parsers[ext] = parser - debug_detailed(MODULE, f"Initialized parser for {ext}") - debug_success( - MODULE, - "SemanticAnalyzer initialized", - parsers=list(self._parsers.keys()), - ) - else: - debug(MODULE, "Using regex-based fallback (tree-sitter not available)") + """Initialize the analyzer.""" + debug(MODULE, "Initializing SemanticAnalyzer (regex-based)") def analyze_diff( self, @@ -171,13 +93,8 @@ def analyze_diff( task_id=task_id, ) - # Use tree-sitter if available for this language - if ext in self._parsers: - debug_detailed(MODULE, f"Using tree-sitter parser for {ext}") - analysis = self._analyze_with_tree_sitter(file_path, before, after, ext) - else: - debug_detailed(MODULE, f"Using regex fallback for {ext}") - analysis = analyze_with_regex(file_path, before, after, ext) + # Use regex-based analysis + analysis = analyze_with_regex(file_path, before, after, ext) debug_success( MODULE, @@ -201,77 +118,6 @@ def analyze_diff( return analysis - def _analyze_with_tree_sitter( - self, - file_path: str, - before: str, - after: str, - ext: str, - ) -> FileAnalysis: - """Analyze using tree-sitter AST parsing.""" - parser = self._parsers[ext] - - tree_before = parser.parse(bytes(before, "utf-8")) - tree_after = parser.parse(bytes(after, "utf-8")) - - # Extract structural elements from both versions - elements_before = self._extract_elements(tree_before, before, ext) - elements_after = self._extract_elements(tree_after, after, ext) - - # Compare and generate semantic changes - changes = compare_elements(elements_before, elements_after, ext) - - # Build the analysis - analysis = FileAnalysis(file_path=file_path, changes=changes) - - # Populate summary fields - for change in changes: - if change.change_type in { - ChangeType.MODIFY_FUNCTION, - ChangeType.ADD_HOOK_CALL, - }: - analysis.functions_modified.add(change.target) - elif change.change_type == ChangeType.ADD_FUNCTION: - analysis.functions_added.add(change.target) - elif change.change_type == ChangeType.ADD_IMPORT: - analysis.imports_added.add(change.target) - elif change.change_type == ChangeType.REMOVE_IMPORT: - analysis.imports_removed.add(change.target) - elif change.change_type in { - ChangeType.MODIFY_CLASS, - ChangeType.ADD_METHOD, - }: - analysis.classes_modified.add(change.target.split(".")[0]) - - analysis.total_lines_changed += change.line_end - change.line_start + 1 - - return analysis - - def _extract_elements( - self, - tree: Tree, - source: str, - ext: str, - ) -> dict[str, ExtractedElement]: - """Extract structural elements from a syntax tree.""" - elements: dict[str, ExtractedElement] = {} - source_bytes = bytes(source, "utf-8") - - def get_text(node: Node) -> str: - return source_bytes[node.start_byte : node.end_byte].decode("utf-8") - - def get_line(byte_pos: int) -> int: - # Convert byte position to line number (1-indexed) - return source[:byte_pos].count("\n") + 1 - - # Language-specific extraction - if ext == ".py": - extract_python_elements(tree.root_node, elements, get_text, get_line) - elif ext in {".js", ".jsx", ".ts", ".tsx"}: - extract_js_elements(tree.root_node, elements, get_text, get_line, ext) - - return elements - def analyze_file(self, file_path: str, content: str) -> FileAnalysis: """ Analyze a single file's structure (not a diff). @@ -291,12 +137,7 @@ def analyze_file(self, file_path: str, content: str) -> FileAnalysis: @property def supported_extensions(self) -> set[str]: """Get the set of supported file extensions.""" - if TREE_SITTER_AVAILABLE: - # Tree-sitter extensions plus regex fallbacks - return set(self._parsers.keys()) | {".py", ".js", ".jsx", ".ts", ".tsx"} - else: - # Only regex-supported extensions - return {".py", ".js", ".jsx", ".ts", ".tsx"} + return {".py", ".js", ".jsx", ".ts", ".tsx"} def is_supported(self, file_path: str) -> bool: """Check if a file type is supported for semantic analysis.""" diff --git a/apps/backend/merge/timeline_git.py b/apps/backend/merge/timeline_git.py index ebf0952a22..cc9e6ca6cd 100644 --- a/apps/backend/merge/timeline_git.py +++ b/apps/backend/merge/timeline_git.py @@ -189,7 +189,14 @@ def get_worktree_file_content(self, task_id: str, file_path: str) -> str: task_id.replace("task-", "") if task_id.startswith("task-") else task_id ) - worktree_path = self.project_path / ".worktrees" / spec_name / file_path + worktree_path = ( + self.project_path + / ".auto-claude" + / "worktrees" + / "tasks" + / spec_name + / file_path + ) if worktree_path.exists(): try: return worktree_path.read_text(encoding="utf-8") diff --git a/apps/backend/ollama_model_detector.py b/apps/backend/ollama_model_detector.py index 40819e029c..aaa43883a5 100644 --- a/apps/backend/ollama_model_detector.py +++ b/apps/backend/ollama_model_detector.py @@ -16,6 +16,7 @@ import argparse import json +import re import sys import urllib.error import urllib.request @@ -23,6 +24,10 @@ DEFAULT_OLLAMA_URL = "http://localhost:11434" +# Minimum Ollama version required for newer embedding models (qwen3-embedding, etc.) +# These models were added in Ollama 0.10.0 +MIN_OLLAMA_VERSION_FOR_NEW_MODELS = "0.10.0" + # Known embedding models and their dimensions # This list helps identify embedding models from the model name KNOWN_EMBEDDING_MODELS = { @@ -31,10 +36,26 @@ "dim": 768, "description": "Google EmbeddingGemma (lightweight)", }, - "qwen3-embedding": {"dim": 1024, "description": "Qwen3 Embedding (0.6B)"}, - "qwen3-embedding:0.6b": {"dim": 1024, "description": "Qwen3 Embedding 0.6B"}, - "qwen3-embedding:4b": {"dim": 2560, "description": "Qwen3 Embedding 4B"}, - "qwen3-embedding:8b": {"dim": 4096, "description": "Qwen3 Embedding 8B"}, + "qwen3-embedding": { + "dim": 1024, + "description": "Qwen3 Embedding (0.6B)", + "min_version": "0.10.0", + }, + "qwen3-embedding:0.6b": { + "dim": 1024, + "description": "Qwen3 Embedding 0.6B", + "min_version": "0.10.0", + }, + "qwen3-embedding:4b": { + "dim": 2560, + "description": "Qwen3 Embedding 4B", + "min_version": "0.10.0", + }, + "qwen3-embedding:8b": { + "dim": 4096, + "description": "Qwen3 Embedding 8B", + "min_version": "0.10.0", + }, "bge-base-en": {"dim": 768, "description": "BAAI General Embedding - Base"}, "bge-large-en": {"dim": 1024, "description": "BAAI General Embedding - Large"}, "bge-small-en": {"dim": 384, "description": "BAAI General Embedding - Small"}, @@ -63,6 +84,7 @@ "size_estimate": "3.1 GB", "dim": 2560, "badge": "recommended", + "min_ollama_version": "0.10.0", }, { "name": "qwen3-embedding:8b", @@ -70,6 +92,7 @@ "size_estimate": "6.0 GB", "dim": 4096, "badge": "quality", + "min_ollama_version": "0.10.0", }, { "name": "qwen3-embedding:0.6b", @@ -77,6 +100,7 @@ "size_estimate": "494 MB", "dim": 1024, "badge": "fast", + "min_ollama_version": "0.10.0", }, { "name": "embeddinggemma", @@ -112,6 +136,22 @@ ] +def parse_version(version_str: str | None) -> tuple[int, ...]: + """Parse a version string like '0.10.0' into a tuple for comparison.""" + if not version_str or not isinstance(version_str, str): + return (0, 0, 0) + # Extract just the numeric parts (handles versions like "0.10.0-rc1") + match = re.match(r"(\d+)\.(\d+)\.(\d+)", version_str) + if match: + return tuple(int(x) for x in match.groups()) + return (0, 0, 0) + + +def version_gte(version: str | None, min_version: str | None) -> bool: + """Check if version >= min_version.""" + return parse_version(version) >= parse_version(min_version) + + def output_json(success: bool, data: Any = None, error: str | None = None) -> None: """Output JSON result to stdout and exit.""" result = {"success": success} @@ -145,6 +185,14 @@ def fetch_ollama_api(base_url: str, endpoint: str, timeout: int = 5) -> dict | N return None +def get_ollama_version(base_url: str) -> str | None: + """Get the Ollama server version.""" + result = fetch_ollama_api(base_url, "api/version") + if result: + return result.get("version") + return None + + def is_embedding_model(model_name: str) -> bool: """Check if a model name suggests it's an embedding model.""" name_lower = model_name.lower() @@ -192,6 +240,19 @@ def get_embedding_description(model_name: str) -> str: return "Embedding model" +def get_model_min_version(model_name: str) -> str | None: + """Get the minimum Ollama version required for a model.""" + name_lower = model_name.lower() + + # Sort keys by length descending to match more specific names first + # e.g., "qwen3-embedding:8b" before "qwen3-embedding" + for known_model in sorted(KNOWN_EMBEDDING_MODELS.keys(), key=len, reverse=True): + if known_model in name_lower: + return KNOWN_EMBEDDING_MODELS[known_model].get("min_version") + + return None + + def cmd_check_status(args) -> None: """Check if Ollama is running and accessible.""" base_url = args.base_url or DEFAULT_OLLAMA_URL @@ -200,12 +261,18 @@ def cmd_check_status(args) -> None: result = fetch_ollama_api(base_url, "api/version") if result: + version = result.get("version", "unknown") output_json( True, data={ "running": True, "url": base_url, - "version": result.get("version", "unknown"), + "version": version, + "supports_new_models": version_gte( + version, MIN_OLLAMA_VERSION_FOR_NEW_MODELS + ) + if version != "unknown" + else None, }, ) else: @@ -319,6 +386,9 @@ def cmd_get_recommended_models(args) -> None: """Get recommended embedding models with install status.""" base_url = args.base_url or DEFAULT_OLLAMA_URL + # Get Ollama version for compatibility checking + ollama_version = get_ollama_version(base_url) + # Get currently installed models result = fetch_ollama_api(base_url, "api/tags") installed_names = set() @@ -330,17 +400,30 @@ def cmd_get_recommended_models(args) -> None: installed_names.add(name) installed_names.add(base_name) - # Build recommended list with install status + # Build recommended list with install status and compatibility recommended = [] for model in RECOMMENDED_EMBEDDING_MODELS: name = model["name"] base_name = name.split(":")[0] if ":" in name else name is_installed = name in installed_names or base_name in installed_names + # Check version compatibility + min_version = model.get("min_ollama_version") + is_compatible = True + compatibility_note = None + if min_version and ollama_version: + is_compatible = version_gte(ollama_version, min_version) + if not is_compatible: + compatibility_note = f"Requires Ollama {min_version}+" + elif min_version and not ollama_version: + compatibility_note = "Version compatibility could not be verified" + recommended.append( { **model, "installed": is_installed, + "compatible": is_compatible, + "compatibility_note": compatibility_note, } ) @@ -350,6 +433,7 @@ def cmd_get_recommended_models(args) -> None: "recommended": recommended, "count": len(recommended), "url": base_url, + "ollama_version": ollama_version, }, ) @@ -363,6 +447,19 @@ def cmd_pull_model(args) -> None: output_error("Model name is required") return + # Check Ollama version compatibility before attempting pull + ollama_version = get_ollama_version(base_url) + min_version = get_model_min_version(model_name) + + if min_version and ollama_version: + if not version_gte(ollama_version, min_version): + output_error( + f"Model '{model_name}' requires Ollama {min_version} or newer. " + f"Your version is {ollama_version}. " + f"Please upgrade Ollama: https://ollama.com/download" + ) + return + try: url = f"{base_url.rstrip('/')}/api/pull" data = json.dumps({"name": model_name}).encode("utf-8") @@ -376,6 +473,22 @@ def cmd_pull_model(args) -> None: try: progress = json.loads(line.decode("utf-8")) + # Check for error in the streaming response + # This handles cases like "requires newer version of Ollama" + if "error" in progress: + error_msg = progress["error"] + # Clean up the error message (remove extra whitespace/newlines) + error_msg = " ".join(error_msg.split()) + # Check if it's a version-related error + if "newer version" in error_msg.lower(): + error_msg = ( + f"Model '{model_name}' requires a newer version of Ollama. " + f"Your version: {ollama_version or 'unknown'}. " + f"Please upgrade: https://ollama.com/download" + ) + output_error(error_msg) + return + # Emit progress as NDJSON to stderr for main process to parse if "completed" in progress and "total" in progress: print( diff --git a/apps/backend/phase_config.py b/apps/backend/phase_config.py index f7b85cdee5..3fc9ba74ef 100644 --- a/apps/backend/phase_config.py +++ b/apps/backend/phase_config.py @@ -7,6 +7,7 @@ """ import json +import os from pathlib import Path from typing import Literal, TypedDict @@ -46,10 +47,10 @@ "complexity_assessment": "medium", } -# Default phase configuration (matches UI defaults) +# Default phase configuration (fallback, matches 'Balanced' profile) DEFAULT_PHASE_MODELS: dict[str, str] = { "spec": "sonnet", - "planning": "opus", + "planning": "sonnet", # Changed from "opus" (fix #433) "coding": "sonnet", "qa": "sonnet", } @@ -94,17 +95,34 @@ def resolve_model_id(model: str) -> str: Resolve a model shorthand (haiku, sonnet, opus) to a full model ID. If the model is already a full ID, return it unchanged. + Priority: + 1. Environment variable override (from API Profile) + 2. Hardcoded MODEL_ID_MAP + 3. Pass through unchanged (assume full model ID) + Args: model: Model shorthand or full ID Returns: Full Claude model ID """ - # Check if it's a shorthand + # Check for environment variable override (from API Profile custom model mappings) if model in MODEL_ID_MAP: + env_var_map = { + "haiku": "ANTHROPIC_DEFAULT_HAIKU_MODEL", + "sonnet": "ANTHROPIC_DEFAULT_SONNET_MODEL", + "opus": "ANTHROPIC_DEFAULT_OPUS_MODEL", + } + env_var = env_var_map.get(model) + if env_var: + env_value = os.environ.get(env_var) + if env_value: + return env_value + + # Fall back to hardcoded mapping return MODEL_ID_MAP[model] - # Already a full model ID + # Already a full model ID or unknown shorthand return model diff --git a/apps/backend/project/command_registry/languages.py b/apps/backend/project/command_registry/languages.py index cd10b0d6b1..e91787eb4e 100644 --- a/apps/backend/project/command_registry/languages.py +++ b/apps/backend/project/command_registry/languages.py @@ -173,12 +173,16 @@ "zig", }, "dart": { + # Core Dart CLI (modern unified tool) "dart", + "pub", + # Flutter CLI (included in Dart language for SDK detection) + "flutter", + # Legacy commands (deprecated but may exist in older projects) "dart2js", "dartanalyzer", "dartdoc", "dartfmt", - "pub", }, } diff --git a/apps/backend/project/command_registry/package_managers.py b/apps/backend/project/command_registry/package_managers.py index 46b30b3712..bf6c1d978a 100644 --- a/apps/backend/project/command_registry/package_managers.py +++ b/apps/backend/project/command_registry/package_managers.py @@ -33,6 +33,9 @@ "brew": {"brew"}, "apt": {"apt", "apt-get", "dpkg"}, "nix": {"nix", "nix-shell", "nix-build", "nix-env"}, + # Dart/Flutter package managers + "pub": {"pub", "dart"}, + "melos": {"melos", "dart", "flutter"}, } diff --git a/apps/backend/project/command_registry/version_managers.py b/apps/backend/project/command_registry/version_managers.py index b4356d0449..04e8e3925b 100644 --- a/apps/backend/project/command_registry/version_managers.py +++ b/apps/backend/project/command_registry/version_managers.py @@ -23,6 +23,8 @@ "rustup": {"rustup"}, "sdkman": {"sdk"}, "jabba": {"jabba"}, + # Dart/Flutter version managers + "fvm": {"fvm", "flutter"}, } diff --git a/apps/backend/project/stack_detector.py b/apps/backend/project/stack_detector.py index 051c685c93..0fa67c29b3 100644 --- a/apps/backend/project/stack_detector.py +++ b/apps/backend/project/stack_detector.py @@ -164,6 +164,12 @@ def detect_package_managers(self) -> None: if self.parser.file_exists("build.gradle", "build.gradle.kts"): self.stack.package_managers.append("gradle") + # Dart/Flutter package managers + if self.parser.file_exists("pubspec.yaml", "pubspec.lock"): + self.stack.package_managers.append("pub") + if self.parser.file_exists("melos.yaml"): + self.stack.package_managers.append("melos") + def detect_databases(self) -> None: """Detect databases from config files and dependencies.""" # Check for database config files @@ -358,3 +364,6 @@ def detect_version_managers(self) -> None: self.stack.version_managers.append("rbenv") if self.parser.file_exists("rust-toolchain.toml", "rust-toolchain"): self.stack.version_managers.append("rustup") + # Flutter Version Manager + if self.parser.file_exists(".fvm", ".fvmrc", "fvm_config.json"): + self.stack.version_managers.append("fvm") diff --git a/apps/backend/prompts/coder.md b/apps/backend/prompts/coder.md index c9cde7f3c2..8b0acd9ef1 100644 --- a/apps/backend/prompts/coder.md +++ b/apps/backend/prompts/coder.md @@ -22,6 +22,68 @@ environment at the start of each prompt in the "YOUR ENVIRONMENT" section. Pay c --- +## ๐Ÿšจ CRITICAL: PATH CONFUSION PREVENTION ๐Ÿšจ + +**THE #1 BUG IN MONOREPOS: Doubled paths after `cd` commands** + +### The Problem + +After running `cd ./apps/frontend`, your current directory changes. If you then use paths like `apps/frontend/src/file.ts`, you're creating **doubled paths** like `apps/frontend/apps/frontend/src/file.ts`. + +### The Solution: ALWAYS CHECK YOUR CWD + +**BEFORE every git command or file operation:** + +```bash +# Step 1: Check where you are +pwd + +# Step 2: Use paths RELATIVE TO CURRENT DIRECTORY +# If pwd shows: /path/to/project/apps/frontend +# Then use: git add src/file.ts +# NOT: git add apps/frontend/src/file.ts +``` + +### Examples + +**โŒ WRONG - Path gets doubled:** +```bash +cd ./apps/frontend +git add apps/frontend/src/file.ts # Looks for apps/frontend/apps/frontend/src/file.ts +``` + +**โœ… CORRECT - Use relative path from current directory:** +```bash +cd ./apps/frontend +pwd # Shows: /path/to/project/apps/frontend +git add src/file.ts # Correctly adds apps/frontend/src/file.ts from project root +``` + +**โœ… ALSO CORRECT - Stay at root, use full relative path:** +```bash +# Don't change directory at all +git add ./apps/frontend/src/file.ts # Works from project root +``` + +### Mandatory Pre-Command Check + +**Before EVERY git add, git commit, or file operation in a monorepo:** + +```bash +# 1. Where am I? +pwd + +# 2. What files am I targeting? +ls -la [target-path] # Verify the path exists + +# 3. Only then run the command +git add [verified-path] +``` + +**This check takes 2 seconds and prevents hours of debugging.** + +--- + ## STEP 1: GET YOUR BEARINGS (MANDATORY) First, check your environment. The prompt should tell you your working directory and spec location. @@ -358,6 +420,20 @@ In your response, acknowledge the checklist: ## STEP 6: IMPLEMENT THE SUBTASK +### Verify Your Location FIRST + +**MANDATORY: Before implementing anything, confirm where you are:** + +```bash +# This should match the "Working Directory" in YOUR ENVIRONMENT section above +pwd +``` + +If you change directories during implementation (e.g., `cd apps/frontend`), remember: +- Your file paths must be RELATIVE TO YOUR NEW LOCATION +- Before any git operation, run `pwd` again to verify your location +- See the "PATH CONFUSION PREVENTION" section above for examples + ### Mark as In Progress Update `implementation_plan.json`: @@ -618,6 +694,31 @@ After successful verification, update the subtask: ## STEP 9: COMMIT YOUR PROGRESS +### Path Verification (MANDATORY FIRST STEP) + +**๐Ÿšจ BEFORE running ANY git commands, verify your current directory:** + +```bash +# Step 1: Where am I? +pwd + +# Step 2: What files do I want to commit? +# If you changed to a subdirectory (e.g., cd apps/frontend), +# you need to use paths RELATIVE TO THAT DIRECTORY, not from project root + +# Step 3: Verify paths exist +ls -la [path-to-files] # Make sure the path is correct from your current location + +# Example in a monorepo: +# If pwd shows: /project/apps/frontend +# Then use: git add src/file.ts +# NOT: git add apps/frontend/src/file.ts (this would look for apps/frontend/apps/frontend/src/file.ts) +``` + +**CRITICAL RULE:** If you're in a subdirectory, either: +- **Option A:** Return to project root: `cd [back to working directory]` +- **Option B:** Use paths relative to your CURRENT directory (check with `pwd`) + ### Secret Scanning (Automatic) The system **automatically scans for secrets** before every commit. If secrets are detected, the commit will be blocked and you'll receive detailed instructions on how to fix it. @@ -634,7 +735,7 @@ The system **automatically scans for secrets** before every commit. If secrets a api_key = os.environ.get("API_KEY") ``` 3. **Update .env.example** - Add placeholder for the new variable -4. **Re-stage and retry** - `git add . && git commit ...` +4. **Re-stage and retry** - `git add . ':!.auto-claude' && git commit ...` **If it's a false positive:** - Add the file pattern to `.secretsignore` in the project root @@ -643,7 +744,17 @@ The system **automatically scans for secrets** before every commit. If secrets a ### Create the Commit ```bash -git add . +# FIRST: Make sure you're in the working directory root (check YOUR ENVIRONMENT section at top) +pwd # Should match your working directory + +# Add all files EXCEPT .auto-claude directory (spec files should never be committed) +git add . ':!.auto-claude' + +# If git add fails with "pathspec did not match", you have a path problem: +# 1. Run pwd to see where you are +# 2. Run git status to see what git sees +# 3. Adjust your paths accordingly + git commit -m "auto-claude: Complete [subtask-id] - [subtask description] - Files modified: [list] @@ -651,6 +762,9 @@ git commit -m "auto-claude: Complete [subtask-id] - [subtask description] - Phase progress: [X]/[Y] subtasks complete" ``` +**CRITICAL**: The `:!.auto-claude` pathspec exclusion ensures spec files are NEVER committed. +These are internal tracking files that must stay local. + ### DO NOT Push to Remote **IMPORTANT**: Do NOT run `git push`. All work stays local until the user reviews and approves. @@ -956,6 +1070,17 @@ Prepare โ†’ Test (small batch) โ†’ Execute (full) โ†’ Cleanup - Clean, working state - **Secret scan must pass before commit** +### Git Configuration - NEVER MODIFY +**CRITICAL**: You MUST NOT modify git user configuration. Never run: +- `git config user.name` +- `git config user.email` +- `git config --local user.*` +- `git config --global user.*` + +The repository inherits the user's configured git identity. Creating "Test User" or +any other fake identity breaks attribution and causes serious issues. If you need +to commit changes, use the existing git identity - do NOT set a new one. + ### The Golden Rule **FIX BUGS NOW.** The next session has no memory. diff --git a/apps/backend/prompts/github/pr_codebase_fit_agent.md b/apps/backend/prompts/github/pr_codebase_fit_agent.md index f9e14e1e3f..9a14b56dbc 100644 --- a/apps/backend/prompts/github/pr_codebase_fit_agent.md +++ b/apps/backend/prompts/github/pr_codebase_fit_agent.md @@ -6,6 +6,23 @@ You are a focused codebase fit review agent. You have been spawned by the orches Ensure new code integrates well with the existing codebase. Check for consistency with project conventions, reuse of existing utilities, and architectural alignment. Focus ONLY on codebase fit - not security, logic correctness, or general quality. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Codebase fit issues in changed code** - New code not following project patterns +2. **Missed reuse opportunities** - "Existing `utils.ts` has a helper for this" +3. **Inconsistent with PR's own changes** - "You used `camelCase` here but `snake_case` elsewhere in the PR" +4. **Breaking conventions in touched areas** - "Your change deviates from the pattern in this file" + +### What is NOT in scope (do NOT report): +1. **Pre-existing inconsistencies** - Old code that doesn't follow patterns +2. **Unrelated suggestions** - Don't suggest patterns for code the PR didn't touch + +**Key distinction:** +- โœ… "Your new component doesn't follow the existing pattern in `components/`" - GOOD +- โœ… "Consider using existing `formatDate()` helper instead of new implementation" - GOOD +- โŒ "The old `legacy/` folder uses different naming conventions" - BAD (pre-existing) + ## Codebase Fit Focus Areas ### 1. Naming Conventions diff --git a/apps/backend/prompts/github/pr_finding_validator.md b/apps/backend/prompts/github/pr_finding_validator.md index b054344ea9..6421e37132 100644 --- a/apps/backend/prompts/github/pr_finding_validator.md +++ b/apps/backend/prompts/github/pr_finding_validator.md @@ -1,16 +1,37 @@ # Finding Validator Agent -You are a finding re-investigator. For each unresolved finding from a previous PR review, you must actively investigate whether it is a REAL issue or a FALSE POSITIVE. +You are a finding re-investigator using EVIDENCE-BASED VALIDATION. For each unresolved finding from a previous PR review, you must actively investigate whether it is a REAL issue or a FALSE POSITIVE. + +**Core Principle: Evidence, not confidence scores.** Either you can prove the issue exists with actual code, or you can't. There is no middle ground. Your job is to prevent false positives from persisting indefinitely by actually reading the code and verifying the issue exists. +## CRITICAL: Check PR Scope First + +**Before investigating any finding, verify it's within THIS PR's scope:** + +1. **Check if the file is in the PR's changed files list** - If not, likely out-of-scope +2. **Check if the line number exists** - If finding cites line 710 but file has 600 lines, it's hallucinated +3. **Check for PR references in commit messages** - Commits like `fix: something (#584)` are from OTHER PRs + +**Dismiss findings as `dismissed_false_positive` if:** +- The finding references a file NOT in the PR's changed files list AND is not about impact on that file +- The line number doesn't exist in the file (hallucinated) +- The finding is about code from a merged branch commit (not this PR's work) + +**Keep findings valid if they're about:** +- Issues in code the PR actually changed +- Impact of PR changes on other code (e.g., "this change breaks callers in X") +- Missing updates to related code (e.g., "you updated A but forgot B") + ## Your Mission For each finding you receive: -1. **READ** the actual code at the file/line location using the Read tool -2. **ANALYZE** whether the described issue actually exists in the code -3. **PROVIDE** concrete code evidence for your conclusion -4. **RETURN** validation status with evidence +1. **VERIFY SCOPE** - Is this file/line actually part of this PR? +2. **READ** the actual code at the file/line location using the Read tool +3. **ANALYZE** whether the described issue actually exists in the code +4. **PROVIDE** concrete code evidence - the actual code that proves or disproves the issue +5. **RETURN** validation status with evidence (binary decision based on what the code shows) ## Investigation Process @@ -24,45 +45,61 @@ Read the file: {finding.file} Focus on lines around: {finding.line} ``` -### Step 2: Analyze with Fresh Eyes +### Step 2: Analyze with Fresh Eyes - NEVER ASSUME + +**CRITICAL: Do NOT assume the original finding is correct.** The original reviewer may have: +- Hallucinated line numbers that don't exist +- Misread or misunderstood the code +- Missed validation/sanitization in callers or surrounding code +- Made assumptions without actually reading the implementation +- Confused similar-looking code patterns + +**You MUST actively verify by asking:** +- Does the code at this exact line ACTUALLY have this issue? +- Did I READ the actual implementation, not just the function name? +- Is there validation/sanitization BEFORE this code is reached? +- Is there framework protection I'm not accounting for? +- Does this line number even EXIST in the file? -**Do NOT assume the original finding is correct.** Ask yourself: -- Does the code ACTUALLY have this issue? -- Is the described vulnerability/bug/problem present? -- Could the original reviewer have misunderstood the code? -- Is there context that makes this NOT an issue (e.g., sanitization elsewhere)? +**NEVER:** +- Trust the finding description without reading the code +- Assume a function is vulnerable based on its name +- Skip checking surrounding context (ยฑ20 lines minimum) +- Confirm a finding just because "it sounds plausible" -Be skeptical. The original review may have hallucinated this finding. +Be HIGHLY skeptical. AI reviews frequently produce false positives. Your job is to catch them. ### Step 3: Document Evidence You MUST provide concrete evidence: -- **Exact code snippet** you examined (copy-paste from the file) +- **Exact code snippet** you examined (copy-paste from the file) - this is the PROOF - **Line numbers** where you found (or didn't find) the issue -- **Your analysis** of whether the issue exists -- **Confidence level** (0.0-1.0) in your conclusion +- **Your analysis** connecting the code to your conclusion +- **Verification flag** - did this code actually exist at the specified location? ## Validation Statuses ### `confirmed_valid` -Use when you verify the issue IS real: +Use when your code evidence PROVES the issue IS real: - The problematic code pattern exists exactly as described -- The vulnerability/bug is present and exploitable +- You can point to the specific lines showing the vulnerability/bug - The code quality issue genuinely impacts the codebase +- **Key question**: Does your code_evidence field contain the actual problematic code? ### `dismissed_false_positive` -Use when you verify the issue does NOT exist: -- The described code pattern is not actually present -- The original finding misunderstood the code -- There is mitigating code that prevents the issue (e.g., input validation elsewhere) -- The finding was based on incorrect assumptions +Use when your code evidence PROVES the issue does NOT exist: +- The described code pattern is not actually present (code_evidence shows different code) +- There is mitigating code that prevents the issue (code_evidence shows the mitigation) +- The finding was based on incorrect assumptions (code_evidence shows reality) +- The line number doesn't exist or contains different code than claimed +- **Key question**: Does your code_evidence field show code that disproves the original finding? ### `needs_human_review` -Use when you cannot determine with confidence: -- The issue requires runtime analysis to verify +Use when you CANNOT find definitive evidence either way: +- The issue requires runtime analysis to verify (static code doesn't prove/disprove) - The code is too complex to analyze statically -- You have conflicting evidence -- Your confidence is below 0.70 +- You found the code but can't determine if it's actually a problem +- **Key question**: Is your code_evidence inconclusive? ## Output Format @@ -75,7 +112,7 @@ Return one result per finding: "code_evidence": "const query = `SELECT * FROM users WHERE id = ${userId}`;", "line_range": [45, 45], "explanation": "SQL injection vulnerability confirmed. User input 'userId' is directly interpolated into the SQL query at line 45 without any sanitization. The query is executed via db.execute() on line 46.", - "confidence": 0.95 + "evidence_verified_in_file": true } ``` @@ -85,8 +122,8 @@ Return one result per finding: "validation_status": "dismissed_false_positive", "code_evidence": "function processInput(data: string): string {\n const sanitized = DOMPurify.sanitize(data);\n return sanitized;\n}", "line_range": [23, 26], - "explanation": "The original finding claimed XSS vulnerability, but the code uses DOMPurify.sanitize() before output. The input is properly sanitized at line 24 before being returned.", - "confidence": 0.88 + "explanation": "The original finding claimed XSS vulnerability, but the code uses DOMPurify.sanitize() before output. The input is properly sanitized at line 24 before being returned. The code evidence proves the issue does NOT exist.", + "evidence_verified_in_file": true } ``` @@ -96,38 +133,56 @@ Return one result per finding: "validation_status": "needs_human_review", "code_evidence": "async function handleRequest(req) {\n // Complex async logic...\n}", "line_range": [100, 150], - "explanation": "The original finding claims a race condition, but verifying this requires understanding the runtime behavior and concurrency model. Cannot determine statically.", - "confidence": 0.45 + "explanation": "The original finding claims a race condition, but verifying this requires understanding the runtime behavior and concurrency model. The static code doesn't provide definitive evidence either way.", + "evidence_verified_in_file": true } ``` -## Confidence Guidelines +```json +{ + "finding_id": "HALLUC-004", + "validation_status": "dismissed_false_positive", + "code_evidence": "// Line 710 does not exist - file only has 600 lines", + "line_range": [600, 600], + "explanation": "The original finding claimed an issue at line 710, but the file only has 600 lines. This is a hallucinated finding - the code doesn't exist.", + "evidence_verified_in_file": false +} +``` + +## Evidence Guidelines -Rate your confidence based on how certain you are: +Validation is binary based on what the code evidence shows: -| Confidence | Meaning | -|------------|---------| -| 0.90-1.00 | Definitive evidence - code clearly shows the issue exists/doesn't exist | -| 0.80-0.89 | Strong evidence - high confidence with minor uncertainty | -| 0.70-0.79 | Moderate evidence - likely correct but some ambiguity | -| 0.50-0.69 | Uncertain - use `needs_human_review` | -| Below 0.50 | Insufficient evidence - must use `needs_human_review` | +| Scenario | Status | Evidence Required | +|----------|--------|-------------------| +| Code shows the exact problem claimed | `confirmed_valid` | Problematic code snippet | +| Code shows issue doesn't exist or is mitigated | `dismissed_false_positive` | Code proving issue is absent | +| Code couldn't be found (hallucinated line/file) | `dismissed_false_positive` | Note that code doesn't exist | +| Code found but can't prove/disprove statically | `needs_human_review` | The inconclusive code | -**Minimum thresholds:** -- To confirm as `confirmed_valid`: confidence >= 0.70 -- To dismiss as `dismissed_false_positive`: confidence >= 0.80 (higher bar for dismissal) -- If below thresholds: must use `needs_human_review` +**Decision rules:** +- If `code_evidence` contains problematic code โ†’ `confirmed_valid` +- If `code_evidence` proves issue doesn't exist โ†’ `dismissed_false_positive` +- If `evidence_verified_in_file` is false โ†’ `dismissed_false_positive` (hallucinated finding) +- If you can't determine from the code โ†’ `needs_human_review` ## Common False Positive Patterns Watch for these patterns that often indicate false positives: -1. **Sanitization elsewhere**: Input is validated/sanitized before reaching the flagged code -2. **Internal-only code**: Code only handles trusted internal data, not user input -3. **Framework protection**: Framework provides automatic protection (e.g., ORM parameterization) -4. **Dead code**: The flagged code is never executed in the current codebase -5. **Test code**: The issue is in test files where it's acceptable -6. **Misread syntax**: Original reviewer misunderstood the language syntax +1. **Non-existent line number**: The line number cited doesn't exist or is beyond EOF - hallucinated finding +2. **Merged branch code**: Finding is about code from a commit like `fix: something (#584)` - another PR +3. **Pre-existing issue, not impact**: Finding flags old bug in untouched code without showing how PR changes relate +4. **Sanitization elsewhere**: Input is validated/sanitized before reaching the flagged code +5. **Internal-only code**: Code only handles trusted internal data, not user input +6. **Framework protection**: Framework provides automatic protection (e.g., ORM parameterization) +7. **Dead code**: The flagged code is never executed in the current codebase +8. **Test code**: The issue is in test files where it's acceptable +9. **Misread syntax**: Original reviewer misunderstood the language syntax + +**Note**: Findings about files outside the PR's changed list are NOT automatically false positives if they're about: +- Impact of PR changes on that file (e.g., "your change breaks X") +- Missing related updates (e.g., "you forgot to update Y") ## Common Valid Issue Patterns @@ -144,15 +199,16 @@ These patterns often confirm the issue is real: 1. **ALWAYS read the actual code** - Never rely on memory or the original finding description 2. **ALWAYS provide code_evidence** - No empty strings. Quote the actual code. 3. **Be skeptical of original findings** - Many AI reviews produce false positives -4. **Higher bar for dismissal** - Need 0.80 confidence to dismiss (vs 0.70 to confirm) -5. **When uncertain, escalate** - Use `needs_human_review` rather than guessing +4. **Evidence is binary** - The code either shows the problem or it doesn't +5. **When evidence is inconclusive, escalate** - Use `needs_human_review` rather than guessing 6. **Look for mitigations** - Check surrounding code for sanitization/validation 7. **Check the full context** - Read ยฑ20 lines, not just the flagged line +8. **Verify code exists** - Set `evidence_verified_in_file` to false if the code/line doesn't exist ## Anti-Patterns to Avoid -- **Trusting the original finding blindly** - Always verify -- **Dismissing without reading code** - Must provide code_evidence -- **Low confidence dismissals** - Needs 0.80+ confidence to dismiss -- **Vague explanations** - Be specific about what you found +- **Trusting the original finding blindly** - Always verify with actual code +- **Dismissing without reading code** - Must provide code_evidence that proves your point +- **Vague explanations** - Be specific about what the code shows and why it proves/disproves the issue - **Missing line numbers** - Always include line_range +- **Speculative conclusions** - Only conclude what the code evidence actually proves diff --git a/apps/backend/prompts/github/pr_followup.md b/apps/backend/prompts/github/pr_followup.md index 1e2fe04efb..423463f05b 100644 --- a/apps/backend/prompts/github/pr_followup.md +++ b/apps/backend/prompts/github/pr_followup.md @@ -71,10 +71,12 @@ Review the diff since the last review for NEW issues: - Regressions that break previously working code - Missing error handling in new code paths -**Apply the 80% confidence threshold:** -- Only report issues you're confident about +**NEVER ASSUME - ALWAYS VERIFY:** +- Actually READ the code before reporting any finding +- Verify the issue exists at the exact line you cite +- Check for validation/mitigation in surrounding code - Don't re-report issues from the previous review -- Focus on genuinely new problems +- Focus on genuinely new problems with code EVIDENCE ### Phase 3: Comment Review @@ -137,11 +139,11 @@ Return a JSON object with this structure: "id": "new-finding-1", "severity": "medium", "category": "security", - "confidence": 0.85, "title": "New hardcoded API key in config", "description": "A new API key was added in config.ts line 45 without using environment variables.", "file": "src/config.ts", "line": 45, + "evidence": "const API_KEY = 'sk-prod-abc123xyz789';", "suggested_fix": "Move to environment variable: process.env.EXTERNAL_API_KEY" } ], @@ -175,11 +177,11 @@ Same format as initial review findings: - **id**: Unique identifier for new finding - **severity**: `critical` | `high` | `medium` | `low` - **category**: `security` | `quality` | `logic` | `test` | `docs` | `pattern` | `performance` -- **confidence**: Float 0.80-1.0 - **title**: Short summary (max 80 chars) - **description**: Detailed explanation - **file**: Relative file path - **line**: Line number +- **evidence**: **REQUIRED** - Actual code snippet proving the issue exists - **suggested_fix**: How to resolve ### verdict diff --git a/apps/backend/prompts/github/pr_followup_newcode_agent.md b/apps/backend/prompts/github/pr_followup_newcode_agent.md index c35e84f876..5021113b97 100644 --- a/apps/backend/prompts/github/pr_followup_newcode_agent.md +++ b/apps/backend/prompts/github/pr_followup_newcode_agent.md @@ -11,6 +11,23 @@ Review the incremental diff for: 4. Potential regressions 5. Incomplete implementations +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Issues in changed code** - Problems in files/lines actually modified by this PR +2. **Impact on unchanged code** - "This change breaks callers in `other_file.ts`" +3. **Missing related changes** - "Similar pattern in `utils.ts` wasn't updated" +4. **Incomplete implementations** - "New field added but not handled in serializer" + +### What is NOT in scope (do NOT report): +1. **Pre-existing bugs** - Old bugs in code this PR didn't touch +2. **Code from merged branches** - Commits with PR references like `(#584)` are from other PRs +3. **Unrelated improvements** - Don't suggest refactoring untouched code + +**Key distinction:** +- โœ… "Your change breaks the caller in `auth.ts`" - GOOD (impact analysis) +- โŒ "The old code in `legacy.ts` has a bug" - BAD (pre-existing, not this PR) + ## Focus Areas Since this is a follow-up review, focus on: @@ -74,15 +91,47 @@ Since this is a follow-up review, focus on: - Minor optimizations - Documentation gaps -## Confidence Scoring +## NEVER ASSUME - ALWAYS VERIFY + +**Before reporting ANY new finding:** + +1. **NEVER assume code is vulnerable** - Read the actual implementation +2. **NEVER assume validation is missing** - Check callers and surrounding code +3. **NEVER assume based on function names** - `unsafeQuery()` might actually be safe +4. **NEVER report without reading the code** - Verify the issue exists at the exact line + +**You MUST:** +- Actually READ the code at the file/line you cite +- Verify there's no sanitization/validation before this code +- Check for framework protections you might miss +- Provide the actual code snippet as evidence + +### Verify Before Reporting "Missing" Safeguards + +For findings claiming something is **missing** (no fallback, no validation, no error handling): + +**Ask yourself**: "Have I verified this is actually missing, or did I just not see it?" + +- Read the **complete function/method** containing the issue, not just the flagged line +- Check for guards, fallbacks, or defensive code that may appear later in the function +- Look for comments indicating intentional design choices +- If uncertain, use the Read/Grep tools to confirm + +**Your evidence must prove absence exists โ€” not just that you didn't see it.** + +โŒ **Weak**: "The code defaults to 'main' without checking if it exists" +โœ… **Strong**: "I read the complete `_detect_target_branch()` function. There is no existence check before the default return." + +**Only report if you can confidently say**: "I verified the complete scope and the safeguard does not exist." + +## Evidence Requirements -Rate confidence (0.0-1.0) based on: -- **>0.9**: Obvious, verifiable issue -- **0.8-0.9**: High confidence with clear evidence -- **0.7-0.8**: Likely issue but some uncertainty -- **<0.7**: Possible issue, needs verification +Every finding MUST include an `evidence` field with: +- The actual problematic code copy-pasted from the diff +- The specific line numbers where the issue exists +- Proof that the issue is real, not speculative -Only report findings with confidence >0.7. +**No evidence = No finding** ## Output Format @@ -99,7 +148,7 @@ Return findings in this structure: "description": "The new login validation query concatenates user input directly into the SQL string without sanitization.", "category": "security", "severity": "critical", - "confidence": 0.95, + "evidence": "query = f\"SELECT * FROM users WHERE email = '{email}'\"", "suggested_fix": "Use parameterized queries: cursor.execute('SELECT * FROM users WHERE email = ?', (email,))", "fixable": true, "source_agent": "new-code-reviewer", @@ -113,7 +162,7 @@ Return findings in this structure: "description": "The fix for LOGIC-003 removed a null check that was protecting against undefined input. Now input.data can be null.", "category": "regression", "severity": "high", - "confidence": 0.88, + "evidence": "result = input.data.process() # input.data can be null, was previously: if input and input.data:", "suggested_fix": "Restore null check: if (input && input.data) { ... }", "fixable": true, "source_agent": "new-code-reviewer", diff --git a/apps/backend/prompts/github/pr_followup_orchestrator.md b/apps/backend/prompts/github/pr_followup_orchestrator.md index da2ee6b97a..4e714df4c3 100644 --- a/apps/backend/prompts/github/pr_followup_orchestrator.md +++ b/apps/backend/prompts/github/pr_followup_orchestrator.md @@ -9,6 +9,40 @@ Perform a focused, efficient follow-up review by: 2. Delegating to specialized agents based on what needs verification 3. Synthesizing findings into a final merge verdict +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Issues in changed code** - Problems in files/lines actually modified by this PR +2. **Impact on unchanged code** - "You changed X but forgot to update Y that depends on it" +3. **Missing related changes** - "This pattern also exists in Z, did you mean to update it too?" +4. **Breaking changes** - "This change breaks callers in other files" + +### What is NOT in scope (do NOT report): +1. **Pre-existing issues in unchanged code** - If old code has a bug but this PR didn't touch it, don't flag it +2. **Code from merged branches** - Commits with PR references like `(#584)` are from OTHER already-reviewed PRs +3. **Unrelated improvements** - Don't suggest refactoring code the PR didn't touch + +**Key distinction:** +- โœ… "Your change to `validateUser()` breaks the caller in `auth.ts:45`" - GOOD (impact of PR changes) +- โœ… "You updated this validation but similar logic in `utils.ts` wasn't updated" - GOOD (incomplete change) +- โŒ "The existing code in `legacy.ts` has a SQL injection" - BAD (pre-existing issue, not this PR) +- โŒ "This code from commit `fix: something (#584)` has an issue" - BAD (different PR) + +**Why this matters:** +When authors merge the base branch into their feature branch, the commit range includes commits from other PRs. The context gathering system filters these out, but if any slip through, recognize them as out-of-scope. + +## Merge Conflicts + +**Check for merge conflicts in the follow-up context.** If `has_merge_conflicts` is `true`: + +1. **Report this prominently** - Merge conflicts block the PR from being merged +2. **Add a CRITICAL finding** with category "merge_conflict" and severity "critical" +3. **Include in verdict reasoning** - The PR cannot be merged until conflicts are resolved +4. **This may be NEW since last review** - Base branch may have changed + +Note: GitHub's API tells us IF there are conflicts but not WHICH files. The finding should state: +> "This PR has merge conflicts with the base branch that must be resolved before merging." + ## Available Specialist Agents You have access to these specialist agents via the Task tool: @@ -97,7 +131,21 @@ After all agents complete: ## Verdict Guidelines +### CRITICAL: CI Status ALWAYS Factors Into Verdict + +**CI status is provided in the context and MUST be considered:** + +- โŒ **Failing CI = BLOCKED** - If ANY CI checks are failing, verdict MUST be BLOCKED regardless of code quality +- โณ **Pending CI = NEEDS_REVISION** - If CI is still running, verdict cannot be READY_TO_MERGE +- โธ๏ธ **Awaiting approval = BLOCKED** - Fork PR workflows awaiting maintainer approval block merge +- โœ… **All passing = Continue with code analysis** - Only then do code findings determine verdict + +**Always mention CI status in your verdict_reasoning.** For example: +- "BLOCKED: 2 CI checks failing (CodeQL, test-frontend). Fix CI before merge." +- "READY_TO_MERGE: All CI checks passing and all findings resolved." + ### READY_TO_MERGE +- **All CI checks passing** (no failing, no pending) - All previous findings verified as resolved OR dismissed as false positives - No CONFIRMED_VALID critical/high issues remaining - No new critical/high issues @@ -105,11 +153,13 @@ After all agents complete: - Contributor questions addressed ### MERGE_WITH_CHANGES +- **All CI checks passing** - Previous findings resolved - Only LOW severity new issues (suggestions) - Optional polish items can be addressed post-merge ### NEEDS_REVISION (Strict Quality Gates) +- **CI checks pending** OR - HIGH or MEDIUM severity findings CONFIRMED_VALID (not dismissed as false positive) - New HIGH or MEDIUM severity issues introduced - Important contributor concerns unaddressed @@ -117,6 +167,8 @@ After all agents complete: - **Note: Only count findings that passed validation** (dismissed_false_positive findings don't block) ### BLOCKED +- **Any CI checks failing** OR +- **Workflows awaiting maintainer approval** (fork PRs) OR - CRITICAL findings remain CONFIRMED_VALID (not dismissed as false positive) - New CRITICAL issues introduced - Fundamental problems with the fix approach @@ -171,16 +223,36 @@ Provide your synthesis as a structured response matching the ParallelFollowupRes } ``` +## CRITICAL: NEVER ASSUME - ALWAYS VERIFY + +**This applies to ALL agents you invoke:** + +1. **NEVER assume a finding is valid** - The finding-validator MUST read the actual code +2. **NEVER assume a fix is correct** - The resolution-verifier MUST verify the change +3. **NEVER assume line numbers are accurate** - Files may be shorter than cited lines +4. **NEVER assume validation is missing** - Check callers and surrounding code +5. **NEVER trust the original finding's description** - It may have been hallucinated + +**Before ANY finding blocks merge:** +- The actual code at that location MUST be read +- The problematic pattern MUST exist as described +- There MUST NOT be mitigation/validation elsewhere +- The evidence MUST be copy-pasted from the actual file + +**Why this matters:** AI reviewers sometimes hallucinate findings. Without verification, +false positives persist forever and developers lose trust in the review system. + ## Important Notes 1. **Be efficient**: Follow-up reviews should be faster than initial reviews 2. **Focus on changes**: Only review what changed since last review -3. **Trust but verify**: Don't assume fixes are correct just because files changed +3. **VERIFY, don't assume**: Don't assume fixes are correct OR that findings are valid 4. **Acknowledge progress**: Recognize genuine effort to address feedback 5. **Be specific**: Clearly state what blocks merge if verdict is not READY_TO_MERGE ## Context You Will Receive +- **CI Status (CRITICAL)** - Passing/failing/pending checks and specific failed check names - Previous review summary and findings - New commits since last review (SHAs, messages) - Diff of changes since last review diff --git a/apps/backend/prompts/github/pr_followup_resolution_agent.md b/apps/backend/prompts/github/pr_followup_resolution_agent.md index c0e4c38f15..9e35b827db 100644 --- a/apps/backend/prompts/github/pr_followup_resolution_agent.md +++ b/apps/backend/prompts/github/pr_followup_resolution_agent.md @@ -10,6 +10,23 @@ For each previous finding, determine whether it has been: - **unresolved**: The issue remains or wasn't addressed - **cant_verify**: Not enough information to determine status +## CRITICAL: Verify Finding is In-Scope + +**Before verifying any finding, check if it's within THIS PR's scope:** + +1. **Is the file in the PR's changed files list?** - If not AND the finding isn't about impact, mark as `cant_verify` +2. **Does the line number exist?** - If finding cites line 710 but file has 600 lines, it was hallucinated +3. **Was this from a merged branch?** - Commits with PR references like `(#584)` are from other PRs + +**Mark as `cant_verify` if:** +- Finding references a file not in PR AND is not about impact of PR changes on that file +- Line number doesn't exist (hallucinated finding) +- Finding is about code from another PR's commits + +**Findings can reference files outside the PR if they're about:** +- Impact of PR changes (e.g., "change to X breaks caller in Y") +- Missing related updates (e.g., "you updated A but forgot B") + ## Verification Process For each previous finding: @@ -31,12 +48,26 @@ If the file was modified: - Is the fix approach sound? - Are there edge cases the fix misses? -### 4. Assign Confidence -Rate your confidence (0.0-1.0): -- **>0.9**: Clear evidence of resolution/non-resolution -- **0.7-0.9**: Strong indicators but some uncertainty -- **0.5-0.7**: Mixed signals, moderate confidence -- **<0.5**: Unclear, consider marking as cant_verify +### 4. Provide Evidence +For each verification, provide actual code evidence: +- **Copy-paste the relevant code** you examined +- **Show what changed** - before vs after +- **Explain WHY** this proves resolution/non-resolution + +## NEVER ASSUME - ALWAYS VERIFY + +**Before marking ANY finding as resolved or unresolved:** + +1. **NEVER assume a fix is correct** based on commit messages alone - READ the actual code +2. **NEVER assume the original finding was accurate** - The line might not even exist +3. **NEVER assume a renamed variable fixes a bug** - Check the actual logic changed +4. **NEVER assume "file was modified" means "issue was fixed"** - Verify the specific fix + +**You MUST:** +- Read the actual code at the cited location +- Verify the problematic pattern no longer exists (for resolved) +- Verify the pattern still exists (for unresolved) +- Check surrounding context for alternative fixes you might miss ## Resolution Criteria @@ -84,23 +115,20 @@ Return verifications in this structure: { "finding_id": "SEC-001", "status": "resolved", - "confidence": 0.92, - "evidence": "The SQL query at line 45 now uses parameterized queries instead of string concatenation. The fix properly escapes all user inputs.", - "resolution_notes": "Changed from f-string to cursor.execute() with parameters" + "evidence": "cursor.execute('SELECT * FROM users WHERE id = ?', (user_id,))", + "resolution_notes": "Changed from f-string to cursor.execute() with parameters. The code at line 45 now uses parameterized queries." }, { "finding_id": "QUAL-002", "status": "partially_resolved", - "confidence": 0.75, - "evidence": "Error handling was added for the main path, but the fallback path at line 78 still lacks try-catch.", + "evidence": "try:\n result = process(data)\nexcept Exception as e:\n log.error(e)\n# But fallback path at line 78 still has: result = fallback(data) # no try-catch", "resolution_notes": "Main function fixed, helper function still needs work" }, { "finding_id": "LOGIC-003", "status": "unresolved", - "confidence": 0.88, - "evidence": "The off-by-one error remains. The loop still uses `<= length` instead of `< length`.", - "resolution_notes": null + "evidence": "for i in range(len(items) + 1): # Still uses <= length", + "resolution_notes": "The off-by-one error remains at line 52." } ] ``` diff --git a/apps/backend/prompts/github/pr_logic_agent.md b/apps/backend/prompts/github/pr_logic_agent.md index 5b81b2bd6a..328ba13d06 100644 --- a/apps/backend/prompts/github/pr_logic_agent.md +++ b/apps/backend/prompts/github/pr_logic_agent.md @@ -6,6 +6,23 @@ You are a focused logic and correctness review agent. You have been spawned by t Verify that the code logic is correct, handles all edge cases, and doesn't introduce subtle bugs. Focus ONLY on logic and correctness issues - not style, security, or general quality. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Logic issues in changed code** - Bugs in files/lines modified by this PR +2. **Logic impact of changes** - "This change breaks the assumption in `caller.ts:50`" +3. **Incomplete state changes** - "You updated state X but forgot to reset Y" +4. **Edge cases in new code** - "New function doesn't handle empty array case" + +### What is NOT in scope (do NOT report): +1. **Pre-existing bugs** - Old logic issues in untouched code +2. **Unrelated improvements** - Don't suggest fixing bugs in code the PR didn't touch + +**Key distinction:** +- โœ… "Your change to `sort()` breaks callers expecting stable order" - GOOD (impact analysis) +- โœ… "Off-by-one error in your new loop" - GOOD (new code) +- โŒ "The old `parser.ts` has a race condition" - BAD (pre-existing, not this PR) + ## Logic Focus Areas ### 1. Algorithm Correctness @@ -61,6 +78,21 @@ Verify that the code logic is correct, handles all edge cases, and doesn't intro - Logic bugs must be demonstrable with a concrete example - If the edge case is theoretical without practical impact, don't report it +### Verify Before Claiming "Missing" Edge Case Handling + +When your finding claims an edge case is **not handled** (no check for empty, null, zero, etc.): + +**Ask yourself**: "Have I verified this case isn't handled, or did I just not see it?" + +- Read the **complete function** โ€” guards often appear later or at the start +- Check callers โ€” the edge case might be prevented by caller validation +- Look for early returns, assertions, or type guards you might have missed + +**Your evidence must prove absence โ€” not just that you didn't see it.** + +โŒ **Weak**: "Empty array case is not handled" +โœ… **Strong**: "I read the complete function (lines 12-45). There's no check for empty arrays, and the code directly accesses `arr[0]` on line 15 without any guard." + ### Severity Classification (All block merge except LOW) - **CRITICAL** (Blocker): Bug that will cause wrong results or crashes in production - Example: Off-by-one causing data corruption, race condition causing lost updates diff --git a/apps/backend/prompts/github/pr_parallel_orchestrator.md b/apps/backend/prompts/github/pr_parallel_orchestrator.md index fbe34fb930..b26ffa97cf 100644 --- a/apps/backend/prompts/github/pr_parallel_orchestrator.md +++ b/apps/backend/prompts/github/pr_parallel_orchestrator.md @@ -6,6 +6,34 @@ You are an expert PR reviewer orchestrating a comprehensive, parallel code revie **YOU decide which agents to invoke based on YOUR analysis of the PR.** There are no programmatic rules - you evaluate the PR's content, complexity, and risk areas, then delegate to the appropriate specialists. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Issues in changed code** - Problems in files/lines actually modified by this PR +2. **Impact on unchanged code** - "You changed X but forgot to update Y that depends on it" +3. **Missing related changes** - "This pattern also exists in Z, did you mean to update it too?" +4. **Breaking changes** - "This change breaks callers in other files" + +### What is NOT in scope (do NOT report): +1. **Pre-existing issues** - Old bugs/issues in code this PR didn't touch +2. **Unrelated improvements** - Don't suggest refactoring untouched code + +**Key distinction:** +- โœ… "Your change to `validateUser()` breaks the caller in `auth.ts:45`" - GOOD (impact of PR) +- โœ… "You updated this validation but similar logic in `utils.ts` wasn't updated" - GOOD (incomplete) +- โŒ "The existing code in `legacy.ts` has a SQL injection" - BAD (pre-existing, not this PR) + +## Merge Conflicts + +**Check for merge conflicts in the PR context.** If `has_merge_conflicts` is `true`: + +1. **Report this prominently** - Merge conflicts block the PR from being merged +2. **Add a CRITICAL finding** with category "merge_conflict" and severity "critical" +3. **Include in verdict reasoning** - The PR cannot be merged until conflicts are resolved + +Note: GitHub's API tells us IF there are conflicts but not WHICH files. The finding should state: +> "This PR has merge conflicts with the base branch that must be resolved before merging." + ## Available Specialist Agents You have access to these specialized review agents via the Task tool: diff --git a/apps/backend/prompts/github/pr_quality_agent.md b/apps/backend/prompts/github/pr_quality_agent.md index f3007f1f81..7a3445fce6 100644 --- a/apps/backend/prompts/github/pr_quality_agent.md +++ b/apps/backend/prompts/github/pr_quality_agent.md @@ -6,6 +6,23 @@ You are a focused code quality review agent. You have been spawned by the orches Perform a thorough code quality review of the provided code changes. Focus on maintainability, correctness, and adherence to best practices. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Quality issues in changed code** - Problems in files/lines modified by this PR +2. **Quality impact of changes** - "This change increases complexity of `handler.ts`" +3. **Incomplete refactoring** - "You cleaned up X but similar pattern in Y wasn't updated" +4. **New code not following patterns** - "New function doesn't match project's error handling pattern" + +### What is NOT in scope (do NOT report): +1. **Pre-existing quality issues** - Old code smells in untouched code +2. **Unrelated improvements** - Don't suggest refactoring code the PR didn't touch + +**Key distinction:** +- โœ… "Your new function has high cyclomatic complexity" - GOOD (new code) +- โœ… "This duplicates existing helper in `utils.ts`, consider reusing it" - GOOD (guidance) +- โŒ "The old `legacy.ts` file has 1000 lines" - BAD (pre-existing, not this PR) + ## Quality Focus Areas ### 1. Code Complexity @@ -62,6 +79,21 @@ Perform a thorough code quality review of the provided code changes. Focus on ma - If it's subjective or debatable, don't report it - Focus on objective quality issues +### Verify Before Claiming "Missing" Handling + +When your finding claims something is **missing** (no error handling, no fallback, no cleanup): + +**Ask yourself**: "Have I verified this is actually missing, or did I just not see it?" + +- Read the **complete function**, not just the flagged line โ€” error handling often appears later +- Check for try/catch blocks, guards, or fallbacks you might have missed +- Look for framework-level handling (global error handlers, middleware) + +**Your evidence must prove absence โ€” not just that you didn't see it.** + +โŒ **Weak**: "This async call has no error handling" +โœ… **Strong**: "I read the complete `processOrder()` function (lines 34-89). The `fetch()` call on line 45 has no try/catch, and there's no `.catch()` anywhere in the function." + ### Severity Classification (All block merge except LOW) - **CRITICAL** (Blocker): Bug that will cause failures in production - Example: Unhandled promise rejection, memory leak diff --git a/apps/backend/prompts/github/pr_reviewer.md b/apps/backend/prompts/github/pr_reviewer.md index 72a8b5dada..93d16ec4cb 100644 --- a/apps/backend/prompts/github/pr_reviewer.md +++ b/apps/backend/prompts/github/pr_reviewer.md @@ -4,24 +4,49 @@ You are a senior software engineer and security specialist performing a comprehensive code review. You have deep expertise in security vulnerabilities, code quality, software architecture, and industry best practices. Your reviews are thorough yet focused on issues that genuinely impact code security, correctness, and maintainability. -## Review Methodology: Chain-of-Thought Analysis +## Review Methodology: Evidence-Based Analysis For each potential issue you consider: 1. **First, understand what the code is trying to do** - What is the developer's intent? What problem are they solving? 2. **Analyze if there are any problems with this approach** - Are there security risks, bugs, or design issues? 3. **Assess the severity and real-world impact** - Can this be exploited? Will this cause production issues? How likely is it to occur? -4. **Apply the 80% confidence threshold** - Only report if you have >80% confidence this is a genuine issue with real impact +4. **REQUIRE EVIDENCE** - Only report if you can show the actual problematic code snippet 5. **Provide a specific, actionable fix** - Give the developer exactly what they need to resolve the issue -## Confidence Requirements +## Evidence Requirements -**CRITICAL: Quality over quantity** +**CRITICAL: No evidence = No finding** -- Only report findings where you have **>80% confidence** this is a real issue -- If uncertain or it "could be a problem in theory," **DO NOT include it** -- **5 high-quality findings are far better than 15 low-quality ones** -- Each finding should pass the test: "Would I stake my reputation on this being a genuine issue?" +- **Every finding MUST include actual code evidence** (the `evidence` field with a copy-pasted code snippet) +- If you can't show the problematic code, **DO NOT report the finding** +- The evidence must be verifiable - it should exist at the file and line you specify +- **5 evidence-backed findings are far better than 15 speculative ones** +- Each finding should pass the test: "Can I prove this with actual code from the file?" + +## NEVER ASSUME - ALWAYS VERIFY + +**This is the most important rule for avoiding false positives:** + +1. **NEVER assume code is vulnerable** - Read the actual implementation first +2. **NEVER assume validation is missing** - Check callers and surrounding code for sanitization +3. **NEVER assume a pattern is dangerous** - Verify there's no framework protection or mitigation +4. **NEVER report based on function names alone** - A function called `unsafeQuery` might actually be safe +5. **NEVER extrapolate from one line** - Read ยฑ20 lines of context minimum + +**Before reporting ANY finding, you MUST:** +- Actually read the code at the file/line you're about to cite +- Verify the problematic pattern exists exactly as you describe +- Check if there's validation/sanitization before or after +- Confirm the code path is actually reachable +- Verify the line number exists (file might be shorter than you think) + +**Common false positive causes to avoid:** +- Reporting line 500 when the file only has 400 lines (hallucination) +- Claiming "no validation" when validation exists in the caller +- Flagging parameterized queries as SQL injection (framework protection) +- Reporting XSS when output is auto-escaped by the framework +- Citing code that was already fixed in an earlier commit ## Anti-Patterns to Avoid @@ -214,14 +239,13 @@ Return a JSON array with this structure: "id": "finding-1", "severity": "critical", "category": "security", - "confidence": 0.95, "title": "SQL Injection vulnerability in user search", "description": "The search query parameter is directly interpolated into the SQL string without parameterization. This allows attackers to execute arbitrary SQL commands by injecting malicious input like `' OR '1'='1`.", "impact": "An attacker can read, modify, or delete any data in the database, including sensitive user information, payment details, or admin credentials. This could lead to complete data breach.", "file": "src/api/users.ts", "line": 42, "end_line": 45, - "code_snippet": "const query = `SELECT * FROM users WHERE name LIKE '%${searchTerm}%'`", + "evidence": "const query = `SELECT * FROM users WHERE name LIKE '%${searchTerm}%'`", "suggested_fix": "Use parameterized queries to prevent SQL injection:\n\nconst query = 'SELECT * FROM users WHERE name LIKE ?';\nconst results = await db.query(query, [`%${searchTerm}%`]);", "fixable": true, "references": ["https://owasp.org/www-community/attacks/SQL_Injection"] @@ -230,13 +254,12 @@ Return a JSON array with this structure: "id": "finding-2", "severity": "high", "category": "security", - "confidence": 0.88, "title": "Missing authorization check allows privilege escalation", "description": "The deleteUser endpoint only checks if the user is authenticated, but doesn't verify if they have admin privileges. Any logged-in user can delete other user accounts.", "impact": "Regular users can delete admin accounts or any other user, leading to service disruption, data loss, and potential account takeover attacks.", "file": "src/api/admin.ts", "line": 78, - "code_snippet": "router.delete('/users/:id', authenticate, async (req, res) => {\n await User.delete(req.params.id);\n});", + "evidence": "router.delete('/users/:id', authenticate, async (req, res) => {\n await User.delete(req.params.id);\n});", "suggested_fix": "Add authorization check:\n\nrouter.delete('/users/:id', authenticate, requireAdmin, async (req, res) => {\n await User.delete(req.params.id);\n});\n\n// Or inline:\nif (!req.user.isAdmin) {\n return res.status(403).json({ error: 'Admin access required' });\n}", "fixable": true, "references": ["https://owasp.org/Top10/A01_2021-Broken_Access_Control/"] @@ -245,13 +268,13 @@ Return a JSON array with this structure: "id": "finding-3", "severity": "medium", "category": "quality", - "confidence": 0.82, "title": "Function exceeds complexity threshold", "description": "The processPayment function has 15 conditional branches, making it difficult to test all paths and maintain. High cyclomatic complexity increases bug risk.", "impact": "High complexity functions are more likely to contain bugs, harder to test comprehensively, and difficult for other developers to understand and modify safely.", "file": "src/payments/processor.ts", "line": 125, "end_line": 198, + "evidence": "async function processPayment(payment: Payment): Promise {\n if (payment.type === 'credit') { ... } else if (payment.type === 'debit') { ... }\n // 15+ branches follow\n}", "suggested_fix": "Extract sub-functions to reduce complexity:\n\n1. validatePaymentData(payment) - handle all validation\n2. calculateFees(amount, type) - fee calculation logic\n3. processRefund(payment) - refund-specific logic\n4. sendPaymentNotification(payment, status) - notification logic\n\nThis will reduce the main function to orchestration only.", "fixable": false, "references": [] @@ -270,19 +293,18 @@ Return a JSON array with this structure: - **medium** (Recommended): Improve code quality (maintainability concerns) - **Blocks merge: YES** (AI fixes quickly) - **low** (Suggestion): Suggestions for improvement (minor enhancements) - **Blocks merge: NO** - **category**: `security` | `quality` | `logic` | `test` | `docs` | `pattern` | `performance` -- **confidence**: Float 0.0-1.0 representing your confidence this is a genuine issue (must be โ‰ฅ0.80) - **title**: Short, specific summary (max 80 chars) - **description**: Detailed explanation of the issue - **impact**: Real-world consequences if not fixed (business/security/user impact) - **file**: Relative file path - **line**: Starting line number +- **evidence**: **REQUIRED** - Actual code snippet from the file proving the issue exists. Must be copy-pasted from the actual code. - **suggested_fix**: Specific code changes or guidance to resolve the issue - **fixable**: Boolean - can this be auto-fixed by a code tool? ### Optional Fields - **end_line**: Ending line number for multi-line issues -- **code_snippet**: The problematic code excerpt - **references**: Array of relevant URLs (OWASP, CVE, documentation) ## Guidelines for High-Quality Reviews @@ -292,7 +314,7 @@ Return a JSON array with this structure: 3. **Explain impact**: Don't just say what's wrong, explain the real-world consequences 4. **Prioritize ruthlessly**: Focus on issues that genuinely matter 5. **Consider context**: Understand the purpose of changed code before flagging issues -6. **Validate confidence**: If you're not >80% sure, don't report it +6. **Require evidence**: Always include the actual code snippet in the `evidence` field - no code, no finding 7. **Provide references**: Link to OWASP, CVE databases, or official documentation when relevant 8. **Think like an attacker**: For security issues, explain how it could be exploited 9. **Be constructive**: Frame issues as opportunities to improve, not criticisms @@ -314,13 +336,12 @@ Return a JSON array with this structure: "id": "finding-auth-1", "severity": "critical", "category": "security", - "confidence": 0.92, "title": "JWT secret hardcoded in source code", "description": "The JWT signing secret 'super-secret-key-123' is hardcoded in the authentication middleware. Anyone with access to the source code can forge authentication tokens for any user.", "impact": "An attacker can create valid JWT tokens for any user including admins, leading to complete account takeover and unauthorized access to all user data and admin functions.", "file": "src/middleware/auth.ts", "line": 12, - "code_snippet": "const SECRET = 'super-secret-key-123';\njwt.sign(payload, SECRET);", + "evidence": "const SECRET = 'super-secret-key-123';\njwt.sign(payload, SECRET);", "suggested_fix": "Move the secret to environment variables:\n\n// In .env file:\nJWT_SECRET=\n\n// In auth.ts:\nconst SECRET = process.env.JWT_SECRET;\nif (!SECRET) {\n throw new Error('JWT_SECRET not configured');\n}\njwt.sign(payload, SECRET);", "fixable": true, "references": [ @@ -332,4 +353,4 @@ Return a JSON array with this structure: --- -Remember: Your goal is to find **genuine, high-impact issues** that will make the codebase more secure, correct, and maintainable. Quality over quantity. Be thorough but focused. +Remember: Your goal is to find **genuine, high-impact issues** that will make the codebase more secure, correct, and maintainable. **Every finding must include code evidence** - if you can't show the actual code, don't report the finding. Quality over quantity. Be thorough but focused. diff --git a/apps/backend/prompts/github/pr_security_agent.md b/apps/backend/prompts/github/pr_security_agent.md index e2c3ae3686..15061038b4 100644 --- a/apps/backend/prompts/github/pr_security_agent.md +++ b/apps/backend/prompts/github/pr_security_agent.md @@ -6,6 +6,23 @@ You are a focused security review agent. You have been spawned by the orchestrat Perform a thorough security review of the provided code changes, focusing ONLY on security vulnerabilities. Do not review code quality, style, or other non-security concerns. +## CRITICAL: PR Scope and Context + +### What IS in scope (report these issues): +1. **Security issues in changed code** - Vulnerabilities introduced or modified by this PR +2. **Security impact of changes** - "This change exposes sensitive data to the new endpoint" +3. **Missing security for new features** - "New API endpoint lacks authentication" +4. **Broken security assumptions** - "Change to auth.ts invalidates security check in handler.ts" + +### What is NOT in scope (do NOT report): +1. **Pre-existing vulnerabilities** - Old security issues in code this PR didn't touch +2. **Unrelated security improvements** - Don't suggest hardening untouched code + +**Key distinction:** +- โœ… "Your new endpoint lacks rate limiting" - GOOD (new code) +- โœ… "This change bypasses the auth check in `middleware.ts`" - GOOD (impact analysis) +- โŒ "The old `legacy_auth.ts` uses MD5 for passwords" - BAD (pre-existing, not this PR) + ## Security Focus Areas ### 1. Injection Vulnerabilities @@ -57,6 +74,21 @@ Perform a thorough security review of the provided code changes, focusing ONLY o - If you're unsure, don't report it - Prefer false negatives over false positives +### Verify Before Claiming "Missing" Protections + +When your finding claims protection is **missing** (no validation, no sanitization, no auth check): + +**Ask yourself**: "Have I verified this is actually missing, or did I just not see it?" + +- Check if validation/sanitization exists elsewhere (middleware, caller, framework) +- Read the **complete function**, not just the flagged line +- Look for comments explaining why something appears unprotected + +**Your evidence must prove absence โ€” not just that you didn't see it.** + +โŒ **Weak**: "User input is used without validation" +โœ… **Strong**: "I checked the complete request flow. Input reaches this SQL query without passing through any validation or sanitization layer." + ### Severity Classification (All block merge except LOW) - **CRITICAL** (Blocker): Exploitable vulnerability leading to data breach, RCE, or system compromise - Example: SQL injection, hardcoded admin password diff --git a/apps/backend/prompts/qa_fixer.md b/apps/backend/prompts/qa_fixer.md index 8507756946..fe5c018025 100644 --- a/apps/backend/prompts/qa_fixer.md +++ b/apps/backend/prompts/qa_fixer.md @@ -80,6 +80,68 @@ lsof -iTCP -sTCP:LISTEN | grep -E "node|python|next|vite" --- +## ๐Ÿšจ CRITICAL: PATH CONFUSION PREVENTION ๐Ÿšจ + +**THE #1 BUG IN MONOREPOS: Doubled paths after `cd` commands** + +### The Problem + +After running `cd ./apps/frontend`, your current directory changes. If you then use paths like `apps/frontend/src/file.ts`, you're creating **doubled paths** like `apps/frontend/apps/frontend/src/file.ts`. + +### The Solution: ALWAYS CHECK YOUR CWD + +**BEFORE every git command or file operation:** + +```bash +# Step 1: Check where you are +pwd + +# Step 2: Use paths RELATIVE TO CURRENT DIRECTORY +# If pwd shows: /path/to/project/apps/frontend +# Then use: git add src/file.ts +# NOT: git add apps/frontend/src/file.ts +``` + +### Examples + +**โŒ WRONG - Path gets doubled:** +```bash +cd ./apps/frontend +git add apps/frontend/src/file.ts # Looks for apps/frontend/apps/frontend/src/file.ts +``` + +**โœ… CORRECT - Use relative path from current directory:** +```bash +cd ./apps/frontend +pwd # Shows: /path/to/project/apps/frontend +git add src/file.ts # Correctly adds apps/frontend/src/file.ts from project root +``` + +**โœ… ALSO CORRECT - Stay at root, use full relative path:** +```bash +# Don't change directory at all +git add ./apps/frontend/src/file.ts # Works from project root +``` + +### Mandatory Pre-Command Check + +**Before EVERY git add, git commit, or file operation in a monorepo:** + +```bash +# 1. Where am I? +pwd + +# 2. What files am I targeting? +ls -la [target-path] # Verify the path exists + +# 3. Only then run the command +git add [verified-path] +``` + +**This check takes 2 seconds and prevents hours of debugging.** + +--- + ## PHASE 3: FIX ISSUES ONE BY ONE For each issue in the fix request: @@ -166,8 +228,45 @@ If any issue is not fixed, go back to Phase 3. ## PHASE 6: COMMIT FIXES +### Path Verification (MANDATORY FIRST STEP) + +**๐Ÿšจ BEFORE running ANY git commands, verify your current directory:** + ```bash -git add . +# Step 1: Where am I? +pwd + +# Step 2: What files do I want to commit? +# If you changed to a subdirectory (e.g., cd apps/frontend), +# you need to use paths RELATIVE TO THAT DIRECTORY, not from project root + +# Step 3: Verify paths exist +ls -la [path-to-files] # Make sure the path is correct from your current location + +# Example in a monorepo: +# If pwd shows: /project/apps/frontend +# Then use: git add src/file.ts +# NOT: git add apps/frontend/src/file.ts (this would look for apps/frontend/apps/frontend/src/file.ts) +``` + +**CRITICAL RULE:** If you're in a subdirectory, either: +- **Option A:** Return to project root: `cd [back to working directory]` +- **Option B:** Use paths relative to your CURRENT directory (check with `pwd`) + +### Create the Commit + +```bash +# FIRST: Make sure you're in the working directory root +pwd # Should match your working directory + +# Add all files EXCEPT .auto-claude directory (spec files should never be committed) +git add . ':!.auto-claude' + +# If git add fails with "pathspec did not match", you have a path problem: +# 1. Run pwd to see where you are +# 2. Run git status to see what git sees +# 3. Adjust your paths accordingly + git commit -m "fix: Address QA issues (qa-requested) Fixes: @@ -182,6 +281,8 @@ Verified: QA Fix Session: [N]" ``` +**CRITICAL**: The `:!.auto-claude` pathspec exclusion ensures spec files are NEVER committed. + **NOTE**: Do NOT push to remote. All work stays local until user reviews and approves. --- @@ -304,6 +405,13 @@ npx prisma migrate dev --name [name] - How you verified - Commit messages +### Git Configuration - NEVER MODIFY +**CRITICAL**: You MUST NOT modify git user configuration. Never run: +- `git config user.name` +- `git config user.email` + +The repository inherits the user's configured git identity. Do NOT set test users. + --- ## QA LOOP BEHAVIOR diff --git a/apps/backend/prompts/qa_reviewer.md b/apps/backend/prompts/qa_reviewer.md index d986a41b6e..ff52320a6b 100644 --- a/apps/backend/prompts/qa_reviewer.md +++ b/apps/backend/prompts/qa_reviewer.md @@ -35,8 +35,8 @@ cat project_index.json # 4. Check build progress cat build-progress.txt -# 5. See what files were changed -git diff main --name-only +# 5. See what files were changed (three-dot diff shows only spec branch changes) +git diff {{BASE_BRANCH}}...HEAD --name-status # 6. Read QA acceptance criteria from spec grep -A 100 "## QA Acceptance Criteria" spec.md @@ -514,7 +514,7 @@ All acceptance criteria verified: The implementation is production-ready. Sign-off recorded in implementation_plan.json. -Ready for merge to main. +Ready for merge to {{BASE_BRANCH}}. ``` ### If Rejected: diff --git a/apps/backend/prompts_pkg/prompt_generator.py b/apps/backend/prompts_pkg/prompt_generator.py index 15d2bc9b09..ebd9148854 100644 --- a/apps/backend/prompts_pkg/prompt_generator.py +++ b/apps/backend/prompts_pkg/prompt_generator.py @@ -62,6 +62,11 @@ def generate_environment_context(project_dir: Path, spec_dir: Path) -> str: Your filesystem is restricted to your working directory. All file paths should be relative to this location. Do NOT use absolute paths. +**โš ๏ธ CRITICAL:** Before ANY git command or file operation, run `pwd` to verify your current +directory. If you've used `cd` to change directories, you MUST use paths relative to your +NEW location, not the working directory. See the PATH CONFUSION PREVENTION section in the +coder prompt for detailed examples. + **Important Files:** - Spec: `{relative_spec}/spec.md` - Plan: `{relative_spec}/implementation_plan.json` diff --git a/apps/backend/prompts_pkg/prompts.py b/apps/backend/prompts_pkg/prompts.py index acb29d7332..83a8726926 100644 --- a/apps/backend/prompts_pkg/prompts.py +++ b/apps/backend/prompts_pkg/prompts.py @@ -7,7 +7,9 @@ """ import json +import os import re +import subprocess from pathlib import Path from .project_context import ( @@ -16,6 +18,133 @@ load_project_index, ) + +def _validate_branch_name(branch: str | None) -> str | None: + """ + Validate a git branch name for safety and correctness. + + Args: + branch: The branch name to validate + + Returns: + The validated branch name, or None if invalid + """ + if not branch or not isinstance(branch, str): + return None + + # Trim whitespace + branch = branch.strip() + + # Reject empty or whitespace-only strings + if not branch: + return None + + # Enforce maximum length (git refs can be long, but 255 is reasonable) + if len(branch) > 255: + return None + + # Require at least one alphanumeric character + if not any(c.isalnum() for c in branch): + return None + + # Only allow common git-ref characters: letters, numbers, ., _, -, / + # This prevents prompt injection and other security issues + if not re.match(r"^[A-Za-z0-9._/-]+$", branch): + return None + + # Reject suspicious patterns that could be prompt injection attempts + # (newlines, control characters are already blocked by the regex above) + + return branch + + +def _get_base_branch_from_metadata(spec_dir: Path) -> str | None: + """ + Read baseBranch from task_metadata.json if it exists. + + Args: + spec_dir: Directory containing the spec files + + Returns: + The baseBranch from metadata, or None if not found or invalid + """ + metadata_path = spec_dir / "task_metadata.json" + if metadata_path.exists(): + try: + with open(metadata_path, encoding="utf-8") as f: + metadata = json.load(f) + base_branch = metadata.get("baseBranch") + # Validate the branch name before returning + return _validate_branch_name(base_branch) + except (json.JSONDecodeError, OSError): + pass + return None + + +def _detect_base_branch(spec_dir: Path, project_dir: Path) -> str: + """ + Detect the base branch for a project/task. + + Priority order: + 1. baseBranch from task_metadata.json (task-level override) + 2. DEFAULT_BRANCH environment variable + 3. Auto-detect main/master/develop (if they exist in git) + 4. Fall back to "main" + + Args: + spec_dir: Directory containing the spec files + project_dir: Project root directory + + Returns: + The detected base branch name + """ + # 1. Check task_metadata.json for task-specific baseBranch + metadata_branch = _get_base_branch_from_metadata(spec_dir) + if metadata_branch: + return metadata_branch + + # 2. Check for DEFAULT_BRANCH env var + env_branch = _validate_branch_name(os.getenv("DEFAULT_BRANCH")) + if env_branch: + # Verify the branch exists (with timeout to prevent hanging) + try: + result = subprocess.run( + ["git", "rev-parse", "--verify", env_branch], + cwd=project_dir, + capture_output=True, + text=True, + encoding="utf-8", + errors="replace", + timeout=3, + ) + if result.returncode == 0: + return env_branch + except subprocess.TimeoutExpired: + # Treat timeout as branch verification failure + pass + + # 3. Auto-detect main/master/develop + for branch in ["main", "master", "develop"]: + try: + result = subprocess.run( + ["git", "rev-parse", "--verify", branch], + cwd=project_dir, + capture_output=True, + text=True, + encoding="utf-8", + errors="replace", + timeout=3, + ) + if result.returncode == 0: + return branch + except subprocess.TimeoutExpired: + # Treat timeout as branch verification failure, try next branch + continue + + # 4. Fall back to "main" + return "main" + + # Directory containing prompt files # prompts/ is a sibling directory of prompts_pkg/, so go up one level first PROMPTS_DIR = Path(__file__).parent.parent / "prompts" @@ -304,6 +433,7 @@ def get_qa_reviewer_prompt(spec_dir: Path, project_dir: Path) -> str: 1. Loads the base QA reviewer prompt 2. Detects project capabilities from project_index.json 3. Injects only relevant MCP tool documentation (Electron, Puppeteer, DB, API) + 4. Detects and injects the correct base branch for git comparisons This saves context window by excluding irrelevant tool docs. For example, a CLI Python project won't get Electron validation docs. @@ -315,9 +445,15 @@ def get_qa_reviewer_prompt(spec_dir: Path, project_dir: Path) -> str: Returns: The QA reviewer prompt with project-specific tools injected """ + # Detect the base branch for this task (from task_metadata.json or auto-detect) + base_branch = _detect_base_branch(spec_dir, project_dir) + # Load base QA reviewer prompt base_prompt = _load_prompt_file("qa_reviewer.md") + # Replace {{BASE_BRANCH}} placeholder with the actual base branch + base_prompt = base_prompt.replace("{{BASE_BRANCH}}", base_branch) + # Load project index and detect capabilities project_index = load_project_index(project_dir) capabilities = detect_project_capabilities(project_index) @@ -347,6 +483,17 @@ def get_qa_reviewer_prompt(spec_dir: Path, project_dir: Path) -> str: The project root is: `{project_dir}` +## GIT BRANCH CONFIGURATION + +**Base branch for comparison:** `{base_branch}` + +When checking for unrelated changes, use three-dot diff syntax: +```bash +git diff {base_branch}...HEAD --name-status +``` + +This shows only changes made in the spec branch since it diverged from `{base_branch}`. + --- ## PROJECT CAPABILITIES DETECTED diff --git a/apps/backend/qa/loop.py b/apps/backend/qa/loop.py index ff8308695e..fcbc1c7f34 100644 --- a/apps/backend/qa/loop.py +++ b/apps/backend/qa/loop.py @@ -6,6 +6,7 @@ approval or max iterations. """ +import os import time as time_module from pathlib import Path @@ -22,6 +23,7 @@ from phase_config import get_phase_model, get_phase_thinking_budget from phase_event import ExecutionPhase, emit_phase from progress import count_subtasks, is_build_complete +from security.constants import PROJECT_DIR_ENV_VAR from task_logger import ( LogPhase, get_task_logger, @@ -83,6 +85,10 @@ async def run_qa_validation_loop( Returns: True if QA approved, False otherwise """ + # Set environment variable for security hooks to find the correct project directory + # This is needed because os.getcwd() may return the wrong directory in worktree mode + os.environ[PROJECT_DIR_ENV_VAR] = str(project_dir.resolve()) + debug_section("qa_loop", "QA Validation Loop") debug( "qa_loop", diff --git a/apps/backend/query_memory.py b/apps/backend/query_memory.py index c16f82d943..e729e892bd 100644 --- a/apps/backend/query_memory.py +++ b/apps/backend/query_memory.py @@ -185,24 +185,31 @@ def cmd_get_memories(args): """ result = conn.execute(query, parameters={"limit": limit}) - df = result.get_as_df() + # Process results without pandas (iterate through result set directly) memories = [] - for _, row in df.iterrows(): + while result.has_next(): + row = result.get_next() + # Row order: uuid, name, created_at, content, description, group_id + uuid_val = serialize_value(row[0]) if len(row) > 0 else None + name_val = serialize_value(row[1]) if len(row) > 1 else "" + created_at_val = serialize_value(row[2]) if len(row) > 2 else None + content_val = serialize_value(row[3]) if len(row) > 3 else "" + description_val = serialize_value(row[4]) if len(row) > 4 else "" + group_id_val = serialize_value(row[5]) if len(row) > 5 else "" + memory = { - "id": row.get("uuid") or row.get("name", "unknown"), - "name": row.get("name", ""), - "type": infer_episode_type(row.get("name", ""), row.get("content", "")), - "timestamp": row.get("created_at") or datetime.now().isoformat(), - "content": row.get("content") - or row.get("description") - or row.get("name", ""), - "description": row.get("description", ""), - "group_id": row.get("group_id", ""), + "id": uuid_val or name_val or "unknown", + "name": name_val or "", + "type": infer_episode_type(name_val or "", content_val or ""), + "timestamp": created_at_val or datetime.now().isoformat(), + "content": content_val or description_val or name_val or "", + "description": description_val or "", + "group_id": group_id_val or "", } # Extract session number if present - session_num = extract_session_number(row.get("name", "")) + session_num = extract_session_number(name_val or "") if session_num: memory["session_number"] = session_num @@ -251,24 +258,31 @@ def cmd_search(args): result = conn.execute( query, parameters={"search_query": search_query, "limit": limit} ) - df = result.get_as_df() + # Process results without pandas memories = [] - for _, row in df.iterrows(): + while result.has_next(): + row = result.get_next() + # Row order: uuid, name, created_at, content, description, group_id + uuid_val = serialize_value(row[0]) if len(row) > 0 else None + name_val = serialize_value(row[1]) if len(row) > 1 else "" + created_at_val = serialize_value(row[2]) if len(row) > 2 else None + content_val = serialize_value(row[3]) if len(row) > 3 else "" + description_val = serialize_value(row[4]) if len(row) > 4 else "" + group_id_val = serialize_value(row[5]) if len(row) > 5 else "" + memory = { - "id": row.get("uuid") or row.get("name", "unknown"), - "name": row.get("name", ""), - "type": infer_episode_type(row.get("name", ""), row.get("content", "")), - "timestamp": row.get("created_at") or datetime.now().isoformat(), - "content": row.get("content") - or row.get("description") - or row.get("name", ""), - "description": row.get("description", ""), - "group_id": row.get("group_id", ""), + "id": uuid_val or name_val or "unknown", + "name": name_val or "", + "type": infer_episode_type(name_val or "", content_val or ""), + "timestamp": created_at_val or datetime.now().isoformat(), + "content": content_val or description_val or name_val or "", + "description": description_val or "", + "group_id": group_id_val or "", "score": 1.0, # Keyword match score } - session_num = extract_session_number(row.get("name", "")) + session_num = extract_session_number(name_val or "") if session_num: memory["session_number"] = session_num @@ -461,19 +475,26 @@ def cmd_get_entities(args): """ result = conn.execute(query, parameters={"limit": limit}) - df = result.get_as_df() + # Process results without pandas entities = [] - for _, row in df.iterrows(): - if not row.get("summary"): + while result.has_next(): + row = result.get_next() + # Row order: uuid, name, summary, created_at + uuid_val = serialize_value(row[0]) if len(row) > 0 else None + name_val = serialize_value(row[1]) if len(row) > 1 else "" + summary_val = serialize_value(row[2]) if len(row) > 2 else "" + created_at_val = serialize_value(row[3]) if len(row) > 3 else None + + if not summary_val: continue entity = { - "id": row.get("uuid") or row.get("name", "unknown"), - "name": row.get("name", ""), - "type": infer_entity_type(row.get("name", "")), - "timestamp": row.get("created_at") or datetime.now().isoformat(), - "content": row.get("summary", ""), + "id": uuid_val or name_val or "unknown", + "name": name_val or "", + "type": infer_entity_type(name_val or ""), + "timestamp": created_at_val or datetime.now().isoformat(), + "content": summary_val or "", } entities.append(entity) @@ -488,6 +509,118 @@ def cmd_get_entities(args): output_error(f"Query failed: {e}") +def cmd_add_episode(args): + """ + Add a new episode to the memory database. + + This is called from the Electron main process to save PR review insights, + patterns, gotchas, and other memories directly to the LadybugDB database. + + Args: + args.db_path: Path to database directory + args.database: Database name + args.name: Episode name/title + args.content: Episode content (JSON string) + args.episode_type: Type of episode (session_insight, pattern, gotcha, task_outcome, pr_review) + args.group_id: Optional group ID for namespacing + """ + if not apply_monkeypatch(): + output_error("Neither kuzu nor LadybugDB is installed") + return + + try: + import uuid as uuid_module + + try: + import kuzu + except ImportError: + import real_ladybug as kuzu + + # Parse content from JSON if provided + content = args.content + if content: + try: + # Try to parse as JSON to validate + parsed = json.loads(content) + # Re-serialize to ensure consistent formatting + content = json.dumps(parsed) + except json.JSONDecodeError: + # If not valid JSON, use as-is + pass + + # Generate unique ID + episode_uuid = str(uuid_module.uuid4()) + created_at = datetime.now().isoformat() + + # Get database path - create directory if needed + full_path = Path(args.db_path) / args.database + if not full_path.exists(): + # For new databases, create the parent directory + Path(args.db_path).mkdir(parents=True, exist_ok=True) + + # Open database (creates it if it doesn't exist) + db = kuzu.Database(str(full_path)) + conn = kuzu.Connection(db) + + # Always try to create the Episodic table if it doesn't exist + # This handles both new databases and existing databases without the table + try: + conn.execute(""" + CREATE NODE TABLE IF NOT EXISTS Episodic ( + uuid STRING PRIMARY KEY, + name STRING, + content STRING, + source_description STRING, + group_id STRING, + created_at STRING + ) + """) + except Exception as schema_err: + # Table might already exist with different schema - that's ok + # The insert will fail if schema is incompatible + sys.stderr.write(f"Schema creation note: {schema_err}\n") + + # Insert the episode + try: + insert_query = """ + CREATE (e:Episodic { + uuid: $uuid, + name: $name, + content: $content, + source_description: $description, + group_id: $group_id, + created_at: $created_at + }) + """ + conn.execute( + insert_query, + parameters={ + "uuid": episode_uuid, + "name": args.name, + "content": content, + "description": f"[{args.episode_type}] {args.name}", + "group_id": args.group_id or "", + "created_at": created_at, + }, + ) + + output_json( + True, + data={ + "id": episode_uuid, + "name": args.name, + "type": args.episode_type, + "timestamp": created_at, + }, + ) + + except Exception as e: + output_error(f"Failed to insert episode: {e}") + + except Exception as e: + output_error(f"Failed to add episode: {e}") + + def infer_episode_type(name: str, content: str = "") -> str: """Infer the episode type from its name and content.""" name_lower = (name or "").lower() @@ -580,6 +713,27 @@ def main(): "--limit", type=int, default=20, help="Maximum results" ) + # add-episode command (for saving memories from Electron app) + add_parser = subparsers.add_parser( + "add-episode", + help="Add an episode to the memory database (called from Electron)", + ) + add_parser.add_argument("db_path", help="Path to database directory") + add_parser.add_argument("database", help="Database name") + add_parser.add_argument("--name", required=True, help="Episode name/title") + add_parser.add_argument( + "--content", required=True, help="Episode content (JSON string)" + ) + add_parser.add_argument( + "--type", + dest="episode_type", + default="session_insight", + help="Episode type (session_insight, pattern, gotcha, task_outcome, pr_review)", + ) + add_parser.add_argument( + "--group-id", dest="group_id", help="Optional group ID for namespacing" + ) + args = parser.parse_args() if not args.command: @@ -594,6 +748,7 @@ def main(): "search": cmd_search, "semantic-search": cmd_semantic_search, "get-entities": cmd_get_entities, + "add-episode": cmd_add_episode, } handler = commands.get(args.command) diff --git a/apps/backend/requirements.txt b/apps/backend/requirements.txt index 59aec7b0ee..95c8a1eacb 100644 --- a/apps/backend/requirements.txt +++ b/apps/backend/requirements.txt @@ -10,6 +10,10 @@ tomli>=2.0.0; python_version < "3.11" real_ladybug>=0.13.0; python_version >= "3.12" graphiti-core>=0.5.0; python_version >= "3.12" +# Windows-specific dependency for LadybugDB/Graphiti +# pywin32 provides Windows system bindings required by real_ladybug +pywin32>=306; sys_platform == "win32" and python_version >= "3.12" + # Google AI (optional - for Gemini LLM and embeddings) google-generativeai>=0.8.0 diff --git a/apps/backend/runners/ai_analyzer/claude_client.py b/apps/backend/runners/ai_analyzer/claude_client.py index e1f5a669dc..5d3f07121a 100644 --- a/apps/backend/runners/ai_analyzer/claude_client.py +++ b/apps/backend/runners/ai_analyzer/claude_client.py @@ -8,6 +8,7 @@ try: from claude_agent_sdk import ClaudeAgentOptions, ClaudeSDKClient + from phase_config import resolve_model_id CLAUDE_SDK_AVAILABLE = True except ImportError: @@ -17,7 +18,7 @@ class ClaudeAnalysisClient: """Wrapper for Claude SDK client with analysis-specific configuration.""" - DEFAULT_MODEL = "claude-sonnet-4-5-20250929" + DEFAULT_MODEL = "sonnet" # Shorthand - resolved via API Profile if configured ALLOWED_TOOLS = ["Read", "Glob", "Grep"] MAX_TURNS = 50 @@ -110,7 +111,7 @@ def _create_client(self, settings_file: Path) -> Any: return ClaudeSDKClient( options=ClaudeAgentOptions( - model=self.DEFAULT_MODEL, + model=resolve_model_id(self.DEFAULT_MODEL), # Resolve via API Profile system_prompt=system_prompt, allowed_tools=self.ALLOWED_TOOLS, max_turns=self.MAX_TURNS, diff --git a/apps/backend/runners/github/cleanup_pr_worktrees.py b/apps/backend/runners/github/cleanup_pr_worktrees.py new file mode 100755 index 0000000000..1a40688f9f --- /dev/null +++ b/apps/backend/runners/github/cleanup_pr_worktrees.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python3 +""" +PR Worktree Cleanup Utility +============================ + +Command-line tool for managing PR review worktrees. + +Usage: + python cleanup_pr_worktrees.py --list # List all worktrees + python cleanup_pr_worktrees.py --cleanup # Run cleanup policies + python cleanup_pr_worktrees.py --cleanup-all # Remove ALL worktrees + python cleanup_pr_worktrees.py --stats # Show cleanup statistics +""" + +import argparse + +# Load module directly to avoid import issues +import importlib.util +import sys +from pathlib import Path + +services_dir = Path(__file__).parent / "services" +module_path = services_dir / "pr_worktree_manager.py" + +spec = importlib.util.spec_from_file_location("pr_worktree_manager", module_path) +pr_worktree_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(pr_worktree_module) + +PRWorktreeManager = pr_worktree_module.PRWorktreeManager +DEFAULT_PR_WORKTREE_MAX_AGE_DAYS = pr_worktree_module.DEFAULT_PR_WORKTREE_MAX_AGE_DAYS +DEFAULT_MAX_PR_WORKTREES = pr_worktree_module.DEFAULT_MAX_PR_WORKTREES +_get_max_age_days = pr_worktree_module._get_max_age_days +_get_max_pr_worktrees = pr_worktree_module._get_max_pr_worktrees + + +def find_project_root() -> Path: + """Find the git project root directory.""" + current = Path.cwd() + while current != current.parent: + if (current / ".git").exists(): + return current + current = current.parent + raise RuntimeError("Not in a git repository") + + +def list_worktrees(manager: PRWorktreeManager) -> None: + """List all PR review worktrees.""" + worktrees = manager.get_worktree_info() + + if not worktrees: + print("No PR review worktrees found.") + return + + print(f"\nFound {len(worktrees)} PR review worktrees:\n") + print(f"{'Directory':<40} {'Age (days)':<12} {'PR':<6}") + print("-" * 60) + + for wt in worktrees: + pr_str = f"#{wt.pr_number}" if wt.pr_number else "N/A" + print(f"{wt.path.name:<40} {wt.age_days:>10.1f} {pr_str:>6}") + + print() + + +def show_stats(manager: PRWorktreeManager) -> None: + """Show worktree cleanup statistics.""" + worktrees = manager.get_worktree_info() + registered = manager.get_registered_worktrees() + # Use resolved paths for consistent comparison (handles macOS symlinks) + registered_resolved = {p.resolve() for p in registered} + + # Get current policy values (may be overridden by env vars) + max_age_days = _get_max_age_days() + max_worktrees = _get_max_pr_worktrees() + + total = len(worktrees) + orphaned = sum( + 1 for wt in worktrees if wt.path.resolve() not in registered_resolved + ) + expired = sum(1 for wt in worktrees if wt.age_days > max_age_days) + excess = max(0, total - max_worktrees) + + print("\nPR Worktree Statistics:") + print(f" Total worktrees: {total}") + print(f" Registered with git: {len(registered)}") + print(f" Orphaned (not in git): {orphaned}") + print(f" Expired (>{max_age_days} days): {expired}") + print(f" Excess (>{max_worktrees} limit): {excess}") + print() + print("Cleanup Policies:") + print(f" Max age: {max_age_days} days") + print(f" Max count: {max_worktrees} worktrees") + print() + + +def cleanup_worktrees(manager: PRWorktreeManager, force: bool = False) -> None: + """Run cleanup policies on worktrees.""" + print("\nRunning PR worktree cleanup...") + if force: + print("WARNING: Force cleanup - removing ALL worktrees!") + count = manager.cleanup_all_worktrees() + print(f"Removed {count} worktrees.") + else: + stats = manager.cleanup_worktrees() + if stats["total"] == 0: + print("No worktrees needed cleanup.") + else: + print("\nCleanup complete:") + print(f" Orphaned removed: {stats['orphaned']}") + print(f" Expired removed: {stats['expired']}") + print(f" Excess removed: {stats['excess']}") + print(f" Total removed: {stats['total']}") + print() + + +def main(): + parser = argparse.ArgumentParser( + description="Manage PR review worktrees", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python cleanup_pr_worktrees.py --list + python cleanup_pr_worktrees.py --cleanup + python cleanup_pr_worktrees.py --stats + python cleanup_pr_worktrees.py --cleanup-all + +Environment variables: + MAX_PR_WORKTREES=10 # Max number of worktrees to keep + PR_WORKTREE_MAX_AGE_DAYS=7 # Max age in days before cleanup + """, + ) + + parser.add_argument( + "--list", action="store_true", help="List all PR review worktrees" + ) + + parser.add_argument( + "--cleanup", + action="store_true", + help="Run cleanup policies (remove orphaned, expired, and excess worktrees)", + ) + + parser.add_argument( + "--cleanup-all", + action="store_true", + help="Remove ALL PR review worktrees (dangerous!)", + ) + + parser.add_argument("--stats", action="store_true", help="Show cleanup statistics") + + parser.add_argument( + "--project-dir", + type=Path, + help="Project directory (default: auto-detect git root)", + ) + + args = parser.parse_args() + + # Require at least one action + if not any([args.list, args.cleanup, args.cleanup_all, args.stats]): + parser.print_help() + return 1 + + try: + # Find project directory + if args.project_dir: + project_dir = args.project_dir + else: + project_dir = find_project_root() + + print(f"Project directory: {project_dir}") + + # Create manager + manager = PRWorktreeManager( + project_dir=project_dir, worktree_dir=".auto-claude/github/pr/worktrees" + ) + + # Execute actions + if args.stats: + show_stats(manager) + + if args.list: + list_worktrees(manager) + + if args.cleanup: + cleanup_worktrees(manager, force=False) + + if args.cleanup_all: + response = input( + "This will remove ALL PR worktrees. Are you sure? (yes/no): " + ) + if response.lower() == "yes": + cleanup_worktrees(manager, force=True) + else: + print("Aborted.") + + return 0 + + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/apps/backend/runners/github/confidence.py b/apps/backend/runners/github/confidence.py index 0e21b211eb..70557b922c 100644 --- a/apps/backend/runners/github/confidence.py +++ b/apps/backend/runners/github/confidence.py @@ -1,16 +1,18 @@ """ -Review Confidence Scoring -========================= +DEPRECATED: Review Confidence Scoring +===================================== -Adds confidence scores to review findings to help users prioritize. +This module is DEPRECATED and will be removed in a future version. -Features: -- Confidence scoring based on pattern matching, historical accuracy -- Risk assessment (false positive likelihood) -- Evidence tracking for transparency -- Calibration based on outcome tracking +The confidence scoring approach has been replaced with EVIDENCE-BASED VALIDATION: +- Instead of assigning confidence scores (0-100), findings now require concrete + code evidence proving the issue exists. +- Simple rule: If you can't show the actual problematic code, don't report it. +- Validation is binary: either the evidence exists in the file or it doesn't. -Usage: +For new code, use evidence-based validation in pydantic_models.py and models.py instead. + +Legacy Usage (deprecated): scorer = ConfidenceScorer(learning_tracker=tracker) # Score a finding @@ -20,10 +22,24 @@ # Get explanation print(scorer.explain_confidence(scored)) + +Migration: + - Instead of `confidence: float`, use `evidence: str` with actual code snippets + - Instead of filtering by confidence threshold, verify evidence exists in file + - See pr_finding_validator.md for the new evidence-based approach """ from __future__ import annotations +import warnings + +warnings.warn( + "The confidence module is deprecated. Use evidence-based validation instead. " + "See models.py 'evidence' field and pr_finding_validator.md for the new approach.", + DeprecationWarning, + stacklevel=2, +) + from dataclasses import dataclass, field from enum import Enum from typing import Any diff --git a/apps/backend/runners/github/context_gatherer.py b/apps/backend/runners/github/context_gatherer.py index 0ce48bf5ea..9a3c551261 100644 --- a/apps/backend/runners/github/context_gatherer.py +++ b/apps/backend/runners/github/context_gatherer.py @@ -204,6 +204,11 @@ class PRContext: # Commit SHAs for worktree creation (PR review isolation) head_sha: str = "" # Commit SHA of PR head (headRefOid) base_sha: str = "" # Commit SHA of PR base (baseRefOid) + # Merge conflict status + has_merge_conflicts: bool = False # True if PR has conflicts with base branch + merge_state_status: str = ( + "" # BEHIND, BLOCKED, CLEAN, DIRTY, HAS_HOOKS, UNKNOWN, UNSTABLE + ) class PRContextGatherer: @@ -276,6 +281,17 @@ async def gather(self) -> PRContext: # Check if diff was truncated (empty diff but files were changed) diff_truncated = len(diff) == 0 and len(changed_files) > 0 + # Check merge conflict status + mergeable = pr_data.get("mergeable", "UNKNOWN") + merge_state_status = pr_data.get("mergeStateStatus", "UNKNOWN") + has_merge_conflicts = mergeable == "CONFLICTING" + + if has_merge_conflicts: + print( + f"[Context] โš ๏ธ PR has merge conflicts (mergeStateStatus: {merge_state_status})", + flush=True, + ) + return PRContext( pr_number=self.pr_number, title=pr_data["title"], @@ -296,6 +312,8 @@ async def gather(self) -> PRContext: diff_truncated=diff_truncated, head_sha=pr_data.get("headRefOid", ""), base_sha=pr_data.get("baseRefOid", ""), + has_merge_conflicts=has_merge_conflicts, + merge_state_status=merge_state_status, ) async def _fetch_pr_metadata(self) -> dict: @@ -317,6 +335,8 @@ async def _fetch_pr_metadata(self) -> dict: "deletions", "changedFiles", "labels", + "mergeable", # MERGEABLE, CONFLICTING, or UNKNOWN + "mergeStateStatus", # BEHIND, BLOCKED, CLEAN, DIRTY, HAS_HOOKS, UNKNOWN, UNSTABLE ], ) @@ -1036,28 +1056,56 @@ async def gather(self) -> FollowupReviewContext: f"[Followup] Comparing {previous_sha[:8]}...{current_sha[:8]}", flush=True ) - # Get commit comparison + # Get PR-scoped files and commits (excludes merge-introduced changes) + # This solves the problem where merging develop into a feature branch + # would include commits from other PRs in the follow-up review. + # Pass reviewed_file_blobs for rebase-resistant comparison + reviewed_file_blobs = getattr(self.previous_review, "reviewed_file_blobs", {}) try: - comparison = await self.gh_client.compare_commits(previous_sha, current_sha) - except Exception as e: - print(f"[Followup] Error comparing commits: {e}", flush=True) - return FollowupReviewContext( - pr_number=self.pr_number, - previous_review=self.previous_review, - previous_commit_sha=previous_sha, - current_commit_sha=current_sha, - error=f"Failed to compare commits: {e}", + pr_files, new_commits = await self.gh_client.get_pr_files_changed_since( + self.pr_number, previous_sha, reviewed_file_blobs=reviewed_file_blobs ) + print( + f"[Followup] PR has {len(pr_files)} files, " + f"{len(new_commits)} commits since last review" + + (" (blob comparison used)" if reviewed_file_blobs else ""), + flush=True, + ) + except Exception as e: + print(f"[Followup] Error getting PR files/commits: {e}", flush=True) + # Fallback to compare_commits if PR endpoints fail + print("[Followup] Falling back to commit comparison...", flush=True) + try: + comparison = await self.gh_client.compare_commits( + previous_sha, current_sha + ) + new_commits = comparison.get("commits", []) + pr_files = comparison.get("files", []) + print( + f"[Followup] Fallback: Found {len(new_commits)} commits, " + f"{len(pr_files)} files (may include merge-introduced changes)", + flush=True, + ) + except Exception as e2: + print(f"[Followup] Fallback also failed: {e2}", flush=True) + return FollowupReviewContext( + pr_number=self.pr_number, + previous_review=self.previous_review, + previous_commit_sha=previous_sha, + current_commit_sha=current_sha, + error=f"Failed to get PR context: {e}, fallback: {e2}", + ) - # Extract data from comparison - commits = comparison.get("commits", []) - files = comparison.get("files", []) + # Use PR files as the canonical list (excludes files from merged branches) + commits = new_commits + files = pr_files print( f"[Followup] Found {len(commits)} new commits, {len(files)} changed files", flush=True, ) # Build diff from file patches + # Note: PR files endpoint returns 'filename' key, compare returns 'filename' too diff_parts = [] files_changed = [] for file_info in files: @@ -1139,6 +1187,26 @@ async def gather(self) -> FollowupReviewContext: flush=True, ) + # Fetch current merge conflict status + has_merge_conflicts = False + merge_state_status = "UNKNOWN" + try: + pr_status = await self.gh_client.pr_get( + self.pr_number, + json_fields=["mergeable", "mergeStateStatus"], + ) + mergeable = pr_status.get("mergeable", "UNKNOWN") + merge_state_status = pr_status.get("mergeStateStatus", "UNKNOWN") + has_merge_conflicts = mergeable == "CONFLICTING" + + if has_merge_conflicts: + print( + f"[Followup] โš ๏ธ PR has merge conflicts (mergeStateStatus: {merge_state_status})", + flush=True, + ) + except Exception as e: + print(f"[Followup] Could not fetch merge status: {e}", flush=True) + return FollowupReviewContext( pr_number=self.pr_number, previous_review=self.previous_review, @@ -1151,4 +1219,6 @@ async def gather(self) -> FollowupReviewContext: + contributor_reviews, ai_bot_comments_since_review=ai_comments, pr_reviews_since_review=pr_reviews, + has_merge_conflicts=has_merge_conflicts, + merge_state_status=merge_state_status, ) diff --git a/apps/backend/runners/github/gh_client.py b/apps/backend/runners/github/gh_client.py index 942aefa2b4..4ade5f913b 100644 --- a/apps/backend/runners/github/gh_client.py +++ b/apps/backend/runners/github/gh_client.py @@ -822,14 +822,17 @@ async def get_pr_checks(self, pr_number: int) -> dict[str, Any]: Returns: Dict with: - - checks: List of check runs with name, status, conclusion + - checks: List of check runs with name, state - passing: Number of passing checks - failing: Number of failing checks - pending: Number of pending checks - failed_checks: List of failed check names """ try: - args = ["pr", "checks", str(pr_number), "--json", "name,state,conclusion"] + # Note: gh pr checks --json only supports: bucket, completedAt, description, + # event, link, name, startedAt, state, workflow + # The 'state' field directly contains the result (SUCCESS, FAILURE, PENDING, etc.) + args = ["pr", "checks", str(pr_number), "--json", "name,state"] args = self._add_repo_flag(args) result = await self.run(args, timeout=30.0) @@ -842,15 +845,14 @@ async def get_pr_checks(self, pr_number: int) -> dict[str, Any]: for check in checks: state = check.get("state", "").upper() - conclusion = check.get("conclusion", "").upper() name = check.get("name", "Unknown") - if state == "COMPLETED": - if conclusion in ("SUCCESS", "NEUTRAL", "SKIPPED"): - passing += 1 - elif conclusion in ("FAILURE", "TIMED_OUT", "CANCELLED"): - failing += 1 - failed_checks.append(name) + # gh pr checks 'state' directly contains: SUCCESS, FAILURE, PENDING, NEUTRAL, etc. + if state in ("SUCCESS", "NEUTRAL", "SKIPPED"): + passing += 1 + elif state in ("FAILURE", "TIMED_OUT", "CANCELLED", "STARTUP_FAILURE"): + failing += 1 + failed_checks.append(name) else: # PENDING, QUEUED, IN_PROGRESS, etc. pending += 1 @@ -872,3 +874,336 @@ async def get_pr_checks(self, pr_number: int) -> dict[str, Any]: "failed_checks": [], "error": str(e), } + + async def get_workflows_awaiting_approval(self, pr_number: int) -> dict[str, Any]: + """ + Get workflow runs awaiting approval for a PR from a fork. + + Workflows from forked repositories require manual approval before running. + These are NOT included in `gh pr checks` and must be queried separately. + + Args: + pr_number: PR number + + Returns: + Dict with: + - awaiting_approval: Number of workflows waiting for approval + - workflow_runs: List of workflow runs with id, name, html_url + - can_approve: Whether this token can approve workflows + """ + try: + # First, get the PR's head SHA to filter workflow runs + pr_args = ["pr", "view", str(pr_number), "--json", "headRefOid"] + pr_args = self._add_repo_flag(pr_args) + pr_result = await self.run(pr_args, timeout=30.0) + pr_data = json.loads(pr_result.stdout) if pr_result.stdout.strip() else {} + head_sha = pr_data.get("headRefOid", "") + + if not head_sha: + return { + "awaiting_approval": 0, + "workflow_runs": [], + "can_approve": False, + } + + # Query workflow runs with action_required status + # Note: We need to use the API endpoint as gh CLI doesn't have direct support + endpoint = ( + "repos/{owner}/{repo}/actions/runs?status=action_required&per_page=100" + ) + args = ["api", "--method", "GET", endpoint] + + result = await self.run(args, timeout=30.0) + data = json.loads(result.stdout) if result.stdout.strip() else {} + all_runs = data.get("workflow_runs", []) + + # Filter to only runs for this PR's head SHA + pr_runs = [ + { + "id": run.get("id"), + "name": run.get("name"), + "html_url": run.get("html_url"), + "workflow_name": run.get("workflow", {}).get("name", "Unknown"), + } + for run in all_runs + if run.get("head_sha") == head_sha + ] + + return { + "awaiting_approval": len(pr_runs), + "workflow_runs": pr_runs, + "can_approve": True, # Assume token has permission, will fail if not + } + except (GHCommandError, GHTimeoutError, json.JSONDecodeError) as e: + logger.warning( + f"Failed to get workflows awaiting approval for #{pr_number}: {e}" + ) + return { + "awaiting_approval": 0, + "workflow_runs": [], + "can_approve": False, + "error": str(e), + } + + async def approve_workflow_run(self, run_id: int) -> bool: + """ + Approve a workflow run that's waiting for approval (from a fork). + + Args: + run_id: The workflow run ID to approve + + Returns: + True if approval succeeded, False otherwise + """ + try: + endpoint = f"repos/{{owner}}/{{repo}}/actions/runs/{run_id}/approve" + args = ["api", "--method", "POST", endpoint] + + await self.run(args, timeout=30.0) + logger.info(f"Approved workflow run {run_id}") + return True + except (GHCommandError, GHTimeoutError) as e: + logger.warning(f"Failed to approve workflow run {run_id}: {e}") + return False + + async def get_pr_checks_comprehensive(self, pr_number: int) -> dict[str, Any]: + """ + Get comprehensive CI status including workflows awaiting approval. + + This combines: + - Standard check runs from `gh pr checks` + - Workflows awaiting approval (for fork PRs) + + Args: + pr_number: PR number + + Returns: + Dict with all check information including awaiting_approval count + """ + # Get standard checks + checks = await self.get_pr_checks(pr_number) + + # Get workflows awaiting approval + awaiting = await self.get_workflows_awaiting_approval(pr_number) + + # Merge the results + checks["awaiting_approval"] = awaiting.get("awaiting_approval", 0) + checks["awaiting_workflow_runs"] = awaiting.get("workflow_runs", []) + + # Update pending count to include awaiting approval + checks["pending"] = checks.get("pending", 0) + awaiting.get( + "awaiting_approval", 0 + ) + + return checks + + async def get_pr_files(self, pr_number: int) -> list[dict[str, Any]]: + """ + Get files changed by a PR using the PR files endpoint. + + IMPORTANT: This returns only files that are part of the PR's actual changes, + NOT files that came in from merging another branch (e.g., develop). + This is crucial for follow-up reviews to avoid reviewing code from other PRs. + + Uses: GET /repos/{owner}/{repo}/pulls/{pr_number}/files + + Args: + pr_number: PR number + + Returns: + List of file objects with: + - filename: Path to the file + - status: added, removed, modified, renamed, copied, changed + - additions: Number of lines added + - deletions: Number of lines deleted + - changes: Total number of line changes + - patch: The unified diff patch for this file (may be absent for large files) + """ + files = [] + page = 1 + per_page = 100 + + while True: + endpoint = f"repos/{{owner}}/{{repo}}/pulls/{pr_number}/files?page={page}&per_page={per_page}" + args = ["api", "--method", "GET", endpoint] + + result = await self.run(args, timeout=60.0) + page_files = json.loads(result.stdout) if result.stdout.strip() else [] + + if not page_files: + break + + files.extend(page_files) + + # Check if we got a full page (more pages might exist) + if len(page_files) < per_page: + break + + page += 1 + + # Safety limit to prevent infinite loops + if page > 50: + logger.warning( + f"PR #{pr_number} has more than 5000 files, stopping pagination" + ) + break + + return files + + async def get_pr_commits(self, pr_number: int) -> list[dict[str, Any]]: + """ + Get commits that are part of a PR using the PR commits endpoint. + + IMPORTANT: This returns only commits that are part of the PR's branch, + NOT commits that came in from merging another branch (e.g., develop). + This is crucial for follow-up reviews to avoid reviewing commits from other PRs. + + Uses: GET /repos/{owner}/{repo}/pulls/{pr_number}/commits + + Args: + pr_number: PR number + + Returns: + List of commit objects with: + - sha: Commit SHA + - commit: Object with message, author, committer info + - author: GitHub user who authored the commit + - committer: GitHub user who committed + - parents: List of parent commit SHAs + """ + commits = [] + page = 1 + per_page = 100 + + while True: + endpoint = f"repos/{{owner}}/{{repo}}/pulls/{pr_number}/commits?page={page}&per_page={per_page}" + args = ["api", "--method", "GET", endpoint] + + result = await self.run(args, timeout=60.0) + page_commits = json.loads(result.stdout) if result.stdout.strip() else [] + + if not page_commits: + break + + commits.extend(page_commits) + + # Check if we got a full page (more pages might exist) + if len(page_commits) < per_page: + break + + page += 1 + + # Safety limit + if page > 10: + logger.warning( + f"PR #{pr_number} has more than 1000 commits, stopping pagination" + ) + break + + return commits + + async def get_pr_files_changed_since( + self, + pr_number: int, + base_sha: str, + reviewed_file_blobs: dict[str, str] | None = None, + ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: + """ + Get files and commits that are part of the PR and changed since a specific commit. + + This method solves the "merge introduced commits" problem by: + 1. Getting the canonical list of PR files (excludes files from merged branches) + 2. Getting the canonical list of PR commits (excludes commits from merged branches) + 3. Filtering to only include commits after base_sha + + When a rebase/force-push is detected (base_sha not found in commits), and + reviewed_file_blobs is provided, uses blob SHA comparison to identify which + files actually changed content. This prevents re-reviewing unchanged files. + + Args: + pr_number: PR number + base_sha: The commit SHA to compare from (e.g., last reviewed commit) + reviewed_file_blobs: Optional dict mapping filename -> blob SHA from the + previous review. Used as fallback when base_sha is not found (rebase). + + Returns: + Tuple of: + - List of file objects that are part of the PR (filtered if blob comparison used) + - List of commit objects that are part of the PR and after base_sha. + NOTE: Returns empty list if rebase/force-push detected, since commit SHAs + are rewritten and we cannot determine which commits are truly "new". + """ + # Get PR's canonical files (these are the actual PR changes) + pr_files = await self.get_pr_files(pr_number) + + # Get PR's canonical commits + pr_commits = await self.get_pr_commits(pr_number) + + # Find the position of base_sha in PR commits + # Use minimum 7-char prefix comparison (git's default short SHA length) + base_index = -1 + min_prefix_len = 7 + base_prefix = ( + base_sha[:min_prefix_len] if len(base_sha) >= min_prefix_len else base_sha + ) + for i, commit in enumerate(pr_commits): + commit_prefix = commit["sha"][:min_prefix_len] + if commit_prefix == base_prefix: + base_index = i + break + + # Commits after base_sha (these are the new commits to review) + if base_index >= 0: + new_commits = pr_commits[base_index + 1 :] + return pr_files, new_commits + + # base_sha not found in PR commits - this happens when: + # 1. The base_sha was from a merge commit (not a direct PR commit) + # 2. The PR was rebased/force-pushed + logger.warning( + f"base_sha {base_sha[:8]} not found in PR #{pr_number} commits. " + "PR was likely rebased or force-pushed." + ) + + # If we have blob SHAs from the previous review, use them to filter files + # Blob SHAs persist across rebases - same content = same blob SHA + if reviewed_file_blobs: # Only use blob comparison if we have actual blob data + changed_files = [] + unchanged_count = 0 + for file in pr_files: + filename = file.get("filename", "") + current_blob_sha = file.get("sha", "") + file_status = file.get("status", "") + previous_blob_sha = reviewed_file_blobs.get(filename, "") + + # Always include files that were added, removed, or renamed + # These are significant changes regardless of blob SHA + if file_status in ("added", "removed", "renamed"): + changed_files.append(file) + elif not previous_blob_sha: + # File wasn't in previous review - include it + changed_files.append(file) + elif current_blob_sha != previous_blob_sha: + # File content changed - include it + changed_files.append(file) + else: + # Same blob SHA = same content - skip it + unchanged_count += 1 + + if unchanged_count > 0: + logger.info( + f"Blob comparison: {len(changed_files)} files changed, " + f"{unchanged_count} unchanged (skipped)" + ) + + # Return filtered files but empty commits list (can't determine "new" commits after rebase) + # After a rebase, all commit SHAs are rewritten so we can't identify which are truly new. + # The file changes via blob comparison are the reliable source of what changed. + return changed_files, [] + + # No blob data available - return all files but empty commits (can't determine new commits) + logger.warning( + "No reviewed_file_blobs available for blob comparison after rebase. " + "Returning all PR files with empty commits list." + ) + return pr_files, [] diff --git a/apps/backend/runners/github/models.py b/apps/backend/runners/github/models.py index cb7dbe22e9..0d95eb2a63 100644 --- a/apps/backend/runners/github/models.py +++ b/apps/backend/runners/github/models.py @@ -214,19 +214,18 @@ class PRReviewFinding: end_line: int | None = None suggested_fix: str | None = None fixable: bool = False - # NEW: Support for verification and redundancy detection - confidence: float = 0.85 # AI's confidence in this finding (0.0-1.0) + # Evidence-based validation: actual code proving the issue exists + evidence: str | None = None # Actual code snippet showing the issue verification_note: str | None = ( None # What evidence is missing or couldn't be verified ) redundant_with: str | None = None # Reference to duplicate code (file:line) - # NEW: Finding validation fields (from finding-validator re-investigation) + # Finding validation fields (from finding-validator re-investigation) validation_status: str | None = ( None # confirmed_valid, dismissed_false_positive, needs_human_review ) validation_evidence: str | None = None # Code snippet examined during validation - validation_confidence: float | None = None # Confidence of validation (0.0-1.0) validation_explanation: str | None = None # Why finding was validated/dismissed def to_dict(self) -> dict: @@ -241,14 +240,13 @@ def to_dict(self) -> dict: "end_line": self.end_line, "suggested_fix": self.suggested_fix, "fixable": self.fixable, - # NEW fields - "confidence": self.confidence, + # Evidence-based validation fields + "evidence": self.evidence, "verification_note": self.verification_note, "redundant_with": self.redundant_with, # Validation fields "validation_status": self.validation_status, "validation_evidence": self.validation_evidence, - "validation_confidence": self.validation_confidence, "validation_explanation": self.validation_explanation, } @@ -265,14 +263,13 @@ def from_dict(cls, data: dict) -> PRReviewFinding: end_line=data.get("end_line"), suggested_fix=data.get("suggested_fix"), fixable=data.get("fixable", False), - # NEW fields - confidence=data.get("confidence", 0.85), + # Evidence-based validation fields + evidence=data.get("evidence"), verification_note=data.get("verification_note"), redundant_with=data.get("redundant_with"), # Validation fields validation_status=data.get("validation_status"), validation_evidence=data.get("validation_evidence"), - validation_confidence=data.get("validation_confidence"), validation_explanation=data.get("validation_explanation"), ) @@ -383,6 +380,9 @@ class PRReviewResult: # Follow-up review tracking reviewed_commit_sha: str | None = None # HEAD SHA at time of review + reviewed_file_blobs: dict[str, str] = field( + default_factory=dict + ) # filename โ†’ blob SHA at time of review (survives rebases) is_followup_review: bool = False # True if this is a follow-up review previous_review_id: int | None = None # Reference to the review this follows up on resolved_findings: list[str] = field(default_factory=list) # Finding IDs now fixed @@ -421,6 +421,7 @@ def to_dict(self) -> dict: "quick_scan_summary": self.quick_scan_summary, # Follow-up review fields "reviewed_commit_sha": self.reviewed_commit_sha, + "reviewed_file_blobs": self.reviewed_file_blobs, "is_followup_review": self.is_followup_review, "previous_review_id": self.previous_review_id, "resolved_findings": self.resolved_findings, @@ -465,6 +466,7 @@ def from_dict(cls, data: dict) -> PRReviewResult: quick_scan_summary=data.get("quick_scan_summary", {}), # Follow-up review fields reviewed_commit_sha=data.get("reviewed_commit_sha"), + reviewed_file_blobs=data.get("reviewed_file_blobs", {}), is_followup_review=data.get("is_followup_review", False), previous_review_id=data.get("previous_review_id"), resolved_findings=data.get("resolved_findings", []), @@ -562,6 +564,16 @@ class FollowupReviewContext: # These are different from comments - they're full review submissions with body text pr_reviews_since_review: list[dict] = field(default_factory=list) + # Merge conflict status + has_merge_conflicts: bool = False # True if PR has conflicts with base branch + merge_state_status: str = ( + "" # BEHIND, BLOCKED, CLEAN, DIRTY, HAS_HOOKS, UNKNOWN, UNSTABLE + ) + + # CI status - passed to AI orchestrator so it can factor into verdict + # Dict with: passing, failing, pending, failed_checks, awaiting_approval + ci_status: dict = field(default_factory=dict) + # Error flag - if set, context gathering failed and data may be incomplete error: str | None = None diff --git a/apps/backend/runners/github/orchestrator.py b/apps/backend/runners/github/orchestrator.py index 0cfb078efe..b98fb22343 100644 --- a/apps/backend/runners/github/orchestrator.py +++ b/apps/backend/runners/github/orchestrator.py @@ -389,17 +389,37 @@ async def review_pr( pr_number=pr_number, ) - # Check CI status - ci_status = await self.gh_client.get_pr_checks(pr_number) + # Check CI status (comprehensive - includes workflows awaiting approval) + ci_status = await self.gh_client.get_pr_checks_comprehensive(pr_number) + + # Log CI status with awaiting approval info + awaiting = ci_status.get("awaiting_approval", 0) + pending_without_awaiting = ci_status.get("pending", 0) - awaiting + ci_log_parts = [ + f"{ci_status.get('passing', 0)} passing", + f"{ci_status.get('failing', 0)} failing", + ] + if pending_without_awaiting > 0: + ci_log_parts.append(f"{pending_without_awaiting} pending") + if awaiting > 0: + ci_log_parts.append(f"{awaiting} awaiting approval") print( - f"[DEBUG orchestrator] CI status: {ci_status.get('passing', 0)} passing, " - f"{ci_status.get('failing', 0)} failing, {ci_status.get('pending', 0)} pending", + f"[orchestrator] CI status: {', '.join(ci_log_parts)}", flush=True, ) + if awaiting > 0: + print( + f"[orchestrator] โš ๏ธ {awaiting} workflow(s) from fork need maintainer approval to run", + flush=True, + ) - # Generate verdict (now includes CI status) + # Generate verdict (includes CI status and merge conflict check) verdict, verdict_reasoning, blockers = self._generate_verdict( - findings, structural_issues, ai_triages, ci_status + findings, + structural_issues, + ai_triages, + ci_status, + has_merge_conflicts=pr_context.has_merge_conflicts, ) print( f"[DEBUG orchestrator] Verdict: {verdict.value} - {verdict_reasoning}", @@ -430,11 +450,31 @@ async def review_pr( structural_issues=structural_issues, ai_triages=ai_triages, risk_assessment=risk_assessment, + ci_status=ci_status, ) # Get HEAD SHA for follow-up review tracking head_sha = self.bot_detector.get_last_commit_sha(pr_context.commits) + # Get file blob SHAs for rebase-resistant follow-up reviews + # Blob SHAs persist across rebases - same content = same blob SHA + file_blobs: dict[str, str] = {} + try: + pr_files = await self.gh_client.get_pr_files(pr_number) + for file in pr_files: + filename = file.get("filename", "") + blob_sha = file.get("sha", "") + if filename and blob_sha: + file_blobs[filename] = blob_sha + print( + f"[Review] Captured {len(file_blobs)} file blob SHAs for follow-up tracking", + flush=True, + ) + except Exception as e: + print( + f"[Review] Warning: Could not capture file blobs: {e}", flush=True + ) + # Create result result = PRReviewResult( pr_number=pr_number, @@ -452,6 +492,8 @@ async def review_pr( quick_scan_summary=quick_scan, # Track the commit SHA for follow-up reviews reviewed_commit_sha=head_sha, + # Track file blobs for rebase-resistant follow-up reviews + reviewed_file_blobs=file_blobs, ) # Post review if configured @@ -479,6 +521,9 @@ async def review_pr( # Save result await result.save(self.github_dir) + # Note: PR review memory is now saved by the Electron app after the review completes + # This ensures memory is saved to the embedded LadybugDB managed by the app + # Mark as reviewed (head_sha already fetched above) if head_sha: self.bot_detector.mark_reviewed(pr_number, head_sha) @@ -594,19 +639,29 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult: await result.save(self.github_dir) return result - # Check if there are new commits - if not followup_context.commits_since_review: + # Check if there are changes to review (commits OR files via blob comparison) + # After a rebase/force-push, commits_since_review will be empty (commit + # SHAs are rewritten), but files_changed_since_review will contain files + # that actually changed content based on blob SHA comparison. + has_commits = bool(followup_context.commits_since_review) + has_file_changes = bool(followup_context.files_changed_since_review) + + if not has_commits and not has_file_changes: + base_sha = previous_review.reviewed_commit_sha[:8] print( - f"[Followup] No new commits since last review at {previous_review.reviewed_commit_sha[:8]}", + f"[Followup] No changes since last review at {base_sha}", flush=True, ) # Return a result indicating no changes + no_change_summary = ( + "No new commits since last review. Previous findings still apply." + ) result = PRReviewResult( pr_number=pr_number, repo=self.config.repo, success=True, findings=previous_review.findings, - summary="No new commits since last review. Previous findings still apply.", + summary=no_change_summary, overall_status=previous_review.overall_status, verdict=previous_review.verdict, verdict_reasoning="No changes since last review.", @@ -618,13 +673,26 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult: await result.save(self.github_dir) return result + # Build progress message based on what changed + if has_commits: + num_commits = len(followup_context.commits_since_review) + change_desc = f"{num_commits} new commits" + else: + # Rebase detected - files changed but no trackable commits + num_files = len(followup_context.files_changed_since_review) + change_desc = f"{num_files} files (rebase detected)" + self._report_progress( "analyzing", 30, - f"Analyzing {len(followup_context.commits_since_review)} new commits...", + f"Analyzing {change_desc}...", pr_number=pr_number, ) + # Fetch CI status BEFORE calling reviewer so AI can factor it into verdict + ci_status = await self.gh_client.get_pr_checks_comprehensive(pr_number) + followup_context.ci_status = ci_status + # Use parallel orchestrator for follow-up if enabled if self.config.use_parallel_orchestrator: print( @@ -669,9 +737,9 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult: ) result = await reviewer.review_followup(followup_context) - # Check CI status and override verdict if failing - ci_status = await self.gh_client.get_pr_checks(pr_number) - failed_checks = ci_status.get("failed_checks", []) + # Fallback: ensure CI failures block merge even if AI didn't factor it in + # (CI status was already passed to AI via followup_context.ci_status) + failed_checks = followup_context.ci_status.get("failed_checks", []) if failed_checks: print( f"[Followup] CI checks failing: {failed_checks}", @@ -703,6 +771,9 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult: # Save result await result.save(self.github_dir) + # Note: PR review memory is now saved by the Electron app after the review completes + # This ensures memory is saved to the embedded LadybugDB managed by the app + # Mark as reviewed with new commit SHA if result.reviewed_commit_sha: self.bot_detector.mark_reviewed(pr_number, result.reviewed_commit_sha) @@ -730,16 +801,26 @@ def _generate_verdict( structural_issues: list[StructuralIssue], ai_triages: list[AICommentTriage], ci_status: dict | None = None, + has_merge_conflicts: bool = False, ) -> tuple[MergeVerdict, str, list[str]]: """ - Generate merge verdict based on all findings and CI status. + Generate merge verdict based on all findings, CI status, and merge conflicts. - NEW: Strengthened to block on verification failures, redundancy issues, - and failing CI checks. + Blocks on: + - Merge conflicts (must be resolved before merging) + - Verification failures + - Redundancy issues + - Failing CI checks """ blockers = [] ci_status = ci_status or {} + # CRITICAL: Merge conflicts block merging - check first + if has_merge_conflicts: + blockers.append( + "Merge Conflicts: PR has conflicts with base branch that must be resolved" + ) + # Count by severity critical = [f for f in findings if f.severity == ReviewSeverity.CRITICAL] high = [f for f in findings if f.severity == ReviewSeverity.HIGH] @@ -780,6 +861,13 @@ def _generate_verdict( for check_name in failed_checks: blockers.append(f"CI Failed: {check_name}") + # Workflows awaiting approval block merging (fork PRs) + awaiting_approval = ci_status.get("awaiting_approval", 0) + if awaiting_approval > 0: + blockers.append( + f"Workflows Pending: {awaiting_approval} workflow(s) awaiting maintainer approval" + ) + # NEW: Verification failures block merging for f in verification_failures: note = f" - {f.verification_note}" if f.verification_note else "" @@ -812,15 +900,29 @@ def _generate_verdict( ) blockers.append(f"{t.tool_name}: {summary}") - # Determine verdict with CI, verification and redundancy checks + # Determine verdict with merge conflicts, CI, verification and redundancy checks if blockers: + # Merge conflicts are the highest priority blocker + if has_merge_conflicts: + verdict = MergeVerdict.BLOCKED + reasoning = ( + "Blocked: PR has merge conflicts with base branch. " + "Resolve conflicts before merge." + ) # CI failures are always blockers - if failed_checks: + elif failed_checks: verdict = MergeVerdict.BLOCKED reasoning = ( f"Blocked: {len(failed_checks)} CI check(s) failing. " "Fix CI before merge." ) + # Workflows awaiting approval block merging + elif awaiting_approval > 0: + verdict = MergeVerdict.BLOCKED + reasoning = ( + f"Blocked: {awaiting_approval} workflow(s) awaiting approval. " + "Approve workflows on GitHub to run CI checks." + ) # NEW: Prioritize verification failures elif verification_failures: verdict = MergeVerdict.BLOCKED @@ -925,6 +1027,7 @@ def _generate_enhanced_summary( structural_issues: list[StructuralIssue], ai_triages: list[AICommentTriage], risk_assessment: dict, + ci_status: dict | None = None, ) -> str: """Generate enhanced summary with verdict, risk, and actionable next steps.""" verdict_emoji = { @@ -934,8 +1037,19 @@ def _generate_enhanced_summary( MergeVerdict.BLOCKED: "๐Ÿ”ด", } + # Generate bottom line for quick scanning + bottom_line = self._generate_bottom_line( + verdict=verdict, + ci_status=ci_status, + blockers=blockers, + findings=findings, + ) + lines = [ f"### Merge Verdict: {verdict_emoji.get(verdict, 'โšช')} {verdict.value.upper().replace('_', ' ')}", + "", + f"> {bottom_line}", + "", verdict_reasoning, "", "### Risk Assessment", @@ -1002,6 +1116,70 @@ def _generate_enhanced_summary( return "\n".join(lines) + def _generate_bottom_line( + self, + verdict: MergeVerdict, + ci_status: dict | None, + blockers: list[str], + findings: list[PRReviewFinding], + ) -> str: + """Generate a one-line summary for quick scanning at the top of the review.""" + # Check CI status + ci = ci_status or {} + pending_ci = ci.get("pending", 0) + failing_ci = ci.get("failing", 0) + awaiting_approval = ci.get("awaiting_approval", 0) + + # Count blocking findings and issues + blocking_findings = [ + f for f in findings if f.severity.value in ("critical", "high", "medium") + ] + code_blockers = [ + b for b in blockers if "CI" not in b and "Merge Conflict" not in b + ] + has_merge_conflicts = any("Merge Conflict" in b for b in blockers) + + # Determine the bottom line based on verdict and context + if verdict == MergeVerdict.READY_TO_MERGE: + return ( + "**โœ… Ready to merge** - All checks passing, no blocking issues found." + ) + + elif verdict == MergeVerdict.BLOCKED: + if has_merge_conflicts: + return "**๐Ÿ”ด Blocked** - Merge conflicts must be resolved before merge." + elif failing_ci > 0: + return f"**๐Ÿ”ด Blocked** - {failing_ci} CI check(s) failing. Fix CI before merge." + elif awaiting_approval > 0: + return "**๐Ÿ”ด Blocked** - Awaiting maintainer approval for fork PR workflow." + elif blocking_findings: + return f"**๐Ÿ”ด Blocked** - {len(blocking_findings)} critical/high/medium issue(s) must be fixed." + else: + return "**๐Ÿ”ด Blocked** - Critical issues must be resolved before merge." + + elif verdict == MergeVerdict.NEEDS_REVISION: + # Key insight: distinguish "waiting on CI" from "needs code fixes" + # Check code issues FIRST before checking pending CI + if blocking_findings: + return f"**๐ŸŸ  Needs revision** - {len(blocking_findings)} issue(s) require attention." + elif code_blockers: + return f"**๐ŸŸ  Needs revision** - {len(code_blockers)} structural/other issue(s) require attention." + elif pending_ci > 0: + # Only show "Ready once CI passes" when no code issues exist + return f"**โณ Ready once CI passes** - {pending_ci} check(s) pending, no blocking code issues." + else: + return "**๐ŸŸ  Needs revision** - See details below." + + elif verdict == MergeVerdict.MERGE_WITH_CHANGES: + if pending_ci > 0: + return ( + "**๐ŸŸก Can merge once CI passes** - Minor suggestions, no blockers." + ) + else: + return "**๐ŸŸก Can merge** - Minor suggestions noted, no blockers." + + return "**๐Ÿ“ Review complete** - See details below." + def _format_review_body(self, result: PRReviewResult) -> str: """Format the review body for posting to GitHub.""" return result.summary diff --git a/apps/backend/runners/github/runner.py b/apps/backend/runners/github/runner.py index 669030e46f..b3934cdc93 100644 --- a/apps/backend/runners/github/runner.py +++ b/apps/backend/runners/github/runner.py @@ -56,8 +56,10 @@ # Add backend to path sys.path.insert(0, str(Path(__file__).parent.parent.parent)) -# Load .env file -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent.parent / ".env" if env_file.exists(): diff --git a/apps/backend/runners/github/services/followup_reviewer.py b/apps/backend/runners/github/services/followup_reviewer.py index 8b8a24181d..5c1c8bbca0 100644 --- a/apps/backend/runners/github/services/followup_reviewer.py +++ b/apps/backend/runners/github/services/followup_reviewer.py @@ -26,6 +26,7 @@ from ..models import FollowupReviewContext, GitHubRunnerConfig try: + from ..gh_client import GHClient from ..models import ( MergeVerdict, PRReviewFinding, @@ -37,6 +38,7 @@ from .prompt_manager import PromptManager from .pydantic_models import FollowupReviewResponse except (ImportError, ValueError, SystemError): + from gh_client import GHClient from models import ( MergeVerdict, PRReviewFinding, @@ -230,6 +232,27 @@ async def review_followup( "complete", 100, "Follow-up review complete!", context.pr_number ) + # Get file blob SHAs for rebase-resistant follow-up reviews + # Blob SHAs persist across rebases - same content = same blob SHA + file_blobs: dict[str, str] = {} + try: + gh_client = GHClient( + project_dir=self.project_dir, + default_timeout=30.0, + repo=self.config.repo, + ) + pr_files = await gh_client.get_pr_files(context.pr_number) + for file in pr_files: + filename = file.get("filename", "") + blob_sha = file.get("sha", "") + if filename and blob_sha: + file_blobs[filename] = blob_sha + logger.info( + f"Captured {len(file_blobs)} file blob SHAs for follow-up tracking" + ) + except Exception as e: + logger.warning(f"Could not capture file blobs: {e}") + return PRReviewResult( pr_number=context.pr_number, repo=self.config.repo, @@ -243,6 +266,7 @@ async def review_followup( reviewed_at=datetime.now().isoformat(), # Follow-up specific fields reviewed_commit_sha=context.current_commit_sha, + reviewed_file_blobs=file_blobs, is_followup_review=True, previous_review_id=context.previous_review.review_id, resolved_findings=[f.id for f in resolved], diff --git a/apps/backend/runners/github/services/parallel_followup_reviewer.py b/apps/backend/runners/github/services/parallel_followup_reviewer.py index fb7a04365b..65409b92e4 100644 --- a/apps/backend/runners/github/services/parallel_followup_reviewer.py +++ b/apps/backend/runners/github/services/parallel_followup_reviewer.py @@ -32,6 +32,8 @@ try: from ...core.client import create_client from ...phase_config import get_thinking_budget + from ..context_gatherer import _validate_git_ref + from ..gh_client import GHClient from ..models import ( GitHubRunnerConfig, MergeVerdict, @@ -40,10 +42,13 @@ ReviewSeverity, ) from .category_utils import map_category + from .pr_worktree_manager import PRWorktreeManager from .pydantic_models import ParallelFollowupResponse from .sdk_utils import process_sdk_stream except (ImportError, ValueError, SystemError): + from context_gatherer import _validate_git_ref from core.client import create_client + from gh_client import GHClient from models import ( GitHubRunnerConfig, MergeVerdict, @@ -53,6 +58,7 @@ ) from phase_config import get_thinking_budget from services.category_utils import map_category + from services.pr_worktree_manager import PRWorktreeManager from services.pydantic_models import ParallelFollowupResponse from services.sdk_utils import process_sdk_stream @@ -62,6 +68,9 @@ # Check if debug mode is enabled DEBUG_MODE = os.environ.get("DEBUG", "").lower() in ("true", "1", "yes") +# Directory for PR review worktrees (shared with initial reviewer) +PR_WORKTREE_DIR = ".auto-claude/github/pr/worktrees" + # Severity mapping for AI responses _SEVERITY_MAPPING = { "critical": ReviewSeverity.CRITICAL, @@ -106,6 +115,7 @@ def __init__( self.github_dir = Path(github_dir) self.config = config self.progress_callback = progress_callback + self.worktree_manager = PRWorktreeManager(project_dir, PR_WORKTREE_DIR) def _report_progress(self, phase: str, progress: int, message: str, **kwargs): """Report progress if callback is set.""" @@ -136,6 +146,37 @@ def _load_prompt(self, filename: str) -> str: logger.warning(f"Prompt file not found: {prompt_file}") return "" + def _create_pr_worktree(self, head_sha: str, pr_number: int) -> Path: + """Create a temporary worktree at the PR head commit. + + Args: + head_sha: The commit SHA of the PR head (validated before use) + pr_number: The PR number for naming + + Returns: + Path to the created worktree + + Raises: + RuntimeError: If worktree creation fails + ValueError: If head_sha fails validation (command injection prevention) + """ + # SECURITY: Validate git ref before use in subprocess calls + if not _validate_git_ref(head_sha): + raise ValueError( + f"Invalid git ref: '{head_sha}'. " + "Must contain only alphanumeric characters, dots, slashes, underscores, and hyphens." + ) + + return self.worktree_manager.create_worktree(head_sha, pr_number) + + def _cleanup_pr_worktree(self, worktree_path: Path) -> None: + """Remove a temporary PR review worktree with fallback chain. + + Args: + worktree_path: Path to the worktree to remove + """ + self.worktree_manager.remove_worktree(worktree_path) + def _define_specialist_agents(self) -> dict[str, AgentDefinition]: """ Define specialist agents for follow-up review. @@ -265,6 +306,44 @@ def _format_ai_reviews(self, context: FollowupReviewContext) -> str: return "\n\n---\n\n".join(ai_content) + def _format_ci_status(self, context: FollowupReviewContext) -> str: + """Format CI status for the prompt.""" + ci_status = context.ci_status + if not ci_status: + return "CI status not available." + + passing = ci_status.get("passing", 0) + failing = ci_status.get("failing", 0) + pending = ci_status.get("pending", 0) + failed_checks = ci_status.get("failed_checks", []) + awaiting_approval = ci_status.get("awaiting_approval", 0) + + lines = [] + + # Overall status + if failing > 0: + lines.append(f"โš ๏ธ **{failing} CI check(s) FAILING** - PR cannot be merged") + elif pending > 0: + lines.append(f"โณ **{pending} CI check(s) pending** - Wait for completion") + elif passing > 0: + lines.append(f"โœ… **All {passing} CI check(s) passing**") + else: + lines.append("No CI checks configured") + + # List failed checks + if failed_checks: + lines.append("\n**Failed checks:**") + for check in failed_checks: + lines.append(f" - โŒ {check}") + + # Awaiting approval (fork PRs) + if awaiting_approval > 0: + lines.append( + f"\nโธ๏ธ **{awaiting_approval} workflow(s) awaiting maintainer approval** (fork PR)" + ) + + return "\n".join(lines) + def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str: """Build full prompt for orchestrator with follow-up context.""" # Load orchestrator prompt @@ -277,6 +356,7 @@ def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str: commits = self._format_commits(context) contributor_comments = self._format_comments(context) ai_reviews = self._format_ai_reviews(context) + ci_status = self._format_ci_status(context) # Truncate diff if too long MAX_DIFF_CHARS = 100_000 @@ -295,6 +375,9 @@ def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str: **New Commits:** {len(context.commits_since_review)} **Files Changed:** {len(context.files_changed_since_review)} +### CI Status (CRITICAL - Must Factor Into Verdict) +{ci_status} + ### Previous Review Summary {context.previous_review.summary[:500] if context.previous_review.summary else "No summary available."} @@ -323,6 +406,7 @@ def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str: Now analyze this follow-up and delegate to the appropriate specialist agents. Remember: YOU decide which agents to invoke based on YOUR analysis. The SDK will run invoked agents in parallel automatically. +**CRITICAL: Your verdict MUST account for CI status. Failing CI = BLOCKED verdict.** """ return base_prompt + followup_context @@ -341,6 +425,9 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: f"[ParallelFollowup] Starting follow-up review for PR #{context.pr_number}" ) + # Track worktree for cleanup + worktree_path: Path | None = None + try: self._report_progress( "orchestrating", @@ -352,13 +439,48 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: # Build orchestrator prompt prompt = self._build_orchestrator_prompt(context) - # Get project root + # Get project root - default to local checkout project_root = ( self.project_dir.parent.parent if self.project_dir.name == "backend" else self.project_dir ) + # Create temporary worktree at PR head commit for isolated review + # This ensures agents read from the correct PR state, not the current checkout + head_sha = context.current_commit_sha + if head_sha and _validate_git_ref(head_sha): + try: + if DEBUG_MODE: + print( + f"[Followup] DEBUG: Creating worktree for head_sha={head_sha}", + flush=True, + ) + worktree_path = self._create_pr_worktree( + head_sha, context.pr_number + ) + project_root = worktree_path + print( + f"[Followup] Using worktree at {worktree_path.name} for PR review", + flush=True, + ) + except Exception as e: + if DEBUG_MODE: + print( + f"[Followup] DEBUG: Worktree creation FAILED: {e}", + flush=True, + ) + logger.warning( + f"[ParallelFollowup] Worktree creation failed, " + f"falling back to local checkout: {e}" + ) + # Fallback to original behavior if worktree creation fails + else: + logger.warning( + f"[ParallelFollowup] Invalid or missing head_sha '{head_sha}', " + "using local checkout" + ) + # Use model and thinking level from config (user settings) model = self.config.model or "claude-sonnet-4-5-20250929" thinking_level = self.config.thinking_level or "medium" @@ -459,15 +581,45 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: f"{len(resolved_ids)} resolved, {len(unresolved_ids)} unresolved" ) + # Generate blockers from critical/high/medium severity findings + # (Medium also blocks merge in our strict quality gates approach) + blockers = [] + + # CRITICAL: Merge conflicts block merging - check FIRST before summary generation + # This must happen before _generate_summary so the summary reflects merge conflict status + if context.has_merge_conflicts: + blockers.append( + "Merge Conflicts: PR has conflicts with base branch that must be resolved" + ) + # Override verdict to BLOCKED if merge conflicts exist + verdict = MergeVerdict.BLOCKED + verdict_reasoning = ( + "Blocked: PR has merge conflicts with base branch. " + "Resolve conflicts before merge." + ) + print( + "[ParallelFollowup] โš ๏ธ PR has merge conflicts - blocking merge", + flush=True, + ) + + for finding in unique_findings: + if finding.severity in ( + ReviewSeverity.CRITICAL, + ReviewSeverity.HIGH, + ReviewSeverity.MEDIUM, + ): + blockers.append(f"{finding.category.value}: {finding.title}") + # Extract validation counts dismissed_count = len(result_data.get("dismissed_false_positive_ids", [])) confirmed_count = result_data.get("confirmed_valid_count", 0) needs_human_count = result_data.get("needs_human_review_count", 0) - # Generate summary + # Generate summary (AFTER merge conflict check so it reflects correct verdict) summary = self._generate_summary( verdict=verdict, verdict_reasoning=verdict_reasoning, + blockers=blockers, resolved_count=len(resolved_ids), unresolved_count=len(unresolved_ids), new_count=len(new_finding_ids), @@ -475,6 +627,7 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: dismissed_false_positive_count=dismissed_count, confirmed_valid_count=confirmed_count, needs_human_review_count=needs_human_count, + ci_status=context.ci_status, ) # Map verdict to overall_status @@ -487,16 +640,26 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: else: overall_status = "approve" - # Generate blockers from critical/high/medium severity findings - # (Medium also blocks merge in our strict quality gates approach) - blockers = [] - for finding in unique_findings: - if finding.severity in ( - ReviewSeverity.CRITICAL, - ReviewSeverity.HIGH, - ReviewSeverity.MEDIUM, - ): - blockers.append(f"{finding.category.value}: {finding.title}") + # Get file blob SHAs for rebase-resistant follow-up reviews + # Blob SHAs persist across rebases - same content = same blob SHA + file_blobs: dict[str, str] = {} + try: + gh_client = GHClient( + project_dir=self.project_dir, + default_timeout=30.0, + repo=self.config.repo, + ) + pr_files = await gh_client.get_pr_files(context.pr_number) + for file in pr_files: + filename = file.get("filename", "") + blob_sha = file.get("sha", "") + if filename and blob_sha: + file_blobs[filename] = blob_sha + logger.info( + f"Captured {len(file_blobs)} file blob SHAs for follow-up tracking" + ) + except Exception as e: + logger.warning(f"Could not capture file blobs: {e}") result = PRReviewResult( pr_number=context.pr_number, @@ -509,6 +672,7 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: verdict_reasoning=verdict_reasoning, blockers=blockers, reviewed_commit_sha=context.current_commit_sha, + reviewed_file_blobs=file_blobs, is_followup_review=True, previous_review_id=context.previous_review.review_id or context.previous_review.pr_number, @@ -543,6 +707,10 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult: is_followup_review=True, reviewed_commit_sha=context.current_commit_sha, ) + finally: + # Always cleanup worktree, even on error + if worktree_path: + self._cleanup_pr_worktree(worktree_path) def _parse_structured_output( self, data: dict, context: FollowupReviewContext @@ -614,13 +782,11 @@ def _parse_structured_output( validation = validation_map.get(rv.finding_id) validation_status = None validation_evidence = None - validation_confidence = None validation_explanation = None if validation: validation_status = validation.validation_status validation_evidence = validation.code_evidence - validation_confidence = validation.confidence validation_explanation = validation.explanation findings.append( @@ -636,7 +802,6 @@ def _parse_structured_output( fixable=original.fixable, validation_status=validation_status, validation_evidence=validation_evidence, - validation_confidence=validation_confidence, validation_explanation=validation_explanation, ) ) @@ -805,6 +970,7 @@ def _generate_summary( self, verdict: MergeVerdict, verdict_reasoning: str, + blockers: list[str], resolved_count: int, unresolved_count: int, new_count: int, @@ -812,13 +978,15 @@ def _generate_summary( dismissed_false_positive_count: int = 0, confirmed_valid_count: int = 0, needs_human_review_count: int = 0, + ci_status: dict | None = None, ) -> str: """Generate a human-readable summary of the follow-up review.""" + # Use same emojis as orchestrator.py for consistency status_emoji = { MergeVerdict.READY_TO_MERGE: "โœ…", - MergeVerdict.MERGE_WITH_CHANGES: "โš ๏ธ", - MergeVerdict.NEEDS_REVISION: "๐Ÿ”„", - MergeVerdict.BLOCKED: "๐Ÿšซ", + MergeVerdict.MERGE_WITH_CHANGES: "๐ŸŸก", + MergeVerdict.NEEDS_REVISION: "๐ŸŸ ", + MergeVerdict.BLOCKED: "๐Ÿ”ด", } emoji = status_emoji.get(verdict, "๐Ÿ“") @@ -826,6 +994,15 @@ def _generate_summary( ", ".join(agents_invoked) if agents_invoked else "orchestrator only" ) + # Generate a prominent bottom-line summary for quick scanning + bottom_line = self._generate_bottom_line( + verdict=verdict, + ci_status=ci_status, + unresolved_count=unresolved_count, + new_count=new_count, + blockers=blockers, + ) + # Build validation section if there are validation results validation_section = "" if ( @@ -838,15 +1015,26 @@ def _generate_summary( - ๐Ÿ” **Dismissed as False Positives**: {dismissed_false_positive_count} findings were re-investigated and found to be incorrect - โœ“ **Confirmed Valid**: {confirmed_valid_count} findings verified as genuine issues - ๐Ÿ‘ค **Needs Human Review**: {needs_human_review_count} findings require manual verification +""" + + # Build blockers section if there are any blockers + blockers_section = "" + if blockers: + blockers_list = "\n".join(f"- {b}" for b in blockers) + blockers_section = f""" +### ๐Ÿšจ Blocking Issues +{blockers_list} """ summary = f"""## {emoji} Follow-up Review: {verdict.value.replace("_", " ").title()} +> {bottom_line} + ### Resolution Status - โœ… **Resolved**: {resolved_count} previous findings addressed - โŒ **Unresolved**: {unresolved_count} previous findings remain - ๐Ÿ†• **New Issues**: {new_count} new findings in recent changes -{validation_section} +{validation_section}{blockers_section} ### Verdict {verdict_reasoning} @@ -857,3 +1045,65 @@ def _generate_summary( *This is an AI-generated follow-up review using parallel specialist analysis with finding validation.* """ return summary + + def _generate_bottom_line( + self, + verdict: MergeVerdict, + ci_status: dict | None, + unresolved_count: int, + new_count: int, + blockers: list[str], + ) -> str: + """Generate a one-line summary for quick scanning at the top of the review.""" + # Check CI status + ci = ci_status or {} + pending_ci = ci.get("pending", 0) + failing_ci = ci.get("failing", 0) + awaiting_approval = ci.get("awaiting_approval", 0) + + # Count blocking issues (excluding CI-related ones) + code_blockers = [ + b for b in blockers if "CI" not in b and "Merge Conflict" not in b + ] + has_merge_conflicts = any("Merge Conflict" in b for b in blockers) + + # Determine the bottom line based on verdict and context + if verdict == MergeVerdict.READY_TO_MERGE: + return "**โœ… Ready to merge** - All checks passing and findings addressed." + + elif verdict == MergeVerdict.BLOCKED: + if has_merge_conflicts: + return "**๐Ÿ”ด Blocked** - Merge conflicts must be resolved before merge." + elif failing_ci > 0: + return f"**๐Ÿ”ด Blocked** - {failing_ci} CI check(s) failing. Fix CI before merge." + elif awaiting_approval > 0: + return "**๐Ÿ”ด Blocked** - Awaiting maintainer approval for fork PR workflow." + elif code_blockers: + return f"**๐Ÿ”ด Blocked** - {len(code_blockers)} blocking issue(s) require fixes." + else: + return "**๐Ÿ”ด Blocked** - Critical issues must be resolved before merge." + + elif verdict == MergeVerdict.NEEDS_REVISION: + # Key insight: distinguish "waiting on CI" from "needs code fixes" + # Check code issues FIRST before checking pending CI + if unresolved_count > 0: + return f"**๐ŸŸ  Needs revision** - {unresolved_count} unresolved finding(s) from previous review." + elif code_blockers: + return f"**๐ŸŸ  Needs revision** - {len(code_blockers)} blocking issue(s) require fixes." + elif new_count > 0: + return f"**๐ŸŸ  Needs revision** - {new_count} new issue(s) found in recent changes." + elif pending_ci > 0: + # Only show "Ready once CI passes" when no code issues exist + return f"**โณ Ready once CI passes** - {pending_ci} check(s) pending, all findings addressed." + else: + return "**๐ŸŸ  Needs revision** - See details below." + + elif verdict == MergeVerdict.MERGE_WITH_CHANGES: + if pending_ci > 0: + return ( + "**๐ŸŸก Can merge once CI passes** - Minor suggestions, no blockers." + ) + else: + return "**๐ŸŸก Can merge** - Minor suggestions noted, no blockers." + + return "**๐Ÿ“ Review complete** - See details below." diff --git a/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py b/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py index 7b7fe00c54..0a2f88ca5b 100644 --- a/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py +++ b/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py @@ -20,9 +20,6 @@ import hashlib import logging import os -import shutil -import subprocess -import uuid from pathlib import Path from typing import Any @@ -32,6 +29,7 @@ from ...core.client import create_client from ...phase_config import get_thinking_budget from ..context_gatherer import PRContext, _validate_git_ref + from ..gh_client import GHClient from ..models import ( GitHubRunnerConfig, MergeVerdict, @@ -40,11 +38,13 @@ ReviewSeverity, ) from .category_utils import map_category + from .pr_worktree_manager import PRWorktreeManager from .pydantic_models import ParallelOrchestratorResponse from .sdk_utils import process_sdk_stream except (ImportError, ValueError, SystemError): from context_gatherer import PRContext, _validate_git_ref from core.client import create_client + from gh_client import GHClient from models import ( GitHubRunnerConfig, MergeVerdict, @@ -54,6 +54,7 @@ ) from phase_config import get_thinking_budget from services.category_utils import map_category + from services.pr_worktree_manager import PRWorktreeManager from services.pydantic_models import ParallelOrchestratorResponse from services.sdk_utils import process_sdk_stream @@ -92,6 +93,7 @@ def __init__( self.github_dir = Path(github_dir) self.config = config self.progress_callback = progress_callback + self.worktree_manager = PRWorktreeManager(project_dir, PR_WORKTREE_DIR) def _report_progress(self, phase: str, progress: int, message: str, **kwargs): """Report progress if callback is set.""" @@ -143,78 +145,7 @@ def _create_pr_worktree(self, head_sha: str, pr_number: int) -> Path: "Must contain only alphanumeric characters, dots, slashes, underscores, and hyphens." ) - worktree_name = f"pr-{pr_number}-{uuid.uuid4().hex[:8]}" - worktree_dir = self.project_dir / PR_WORKTREE_DIR - - if DEBUG_MODE: - print(f"[PRReview] DEBUG: project_dir={self.project_dir}", flush=True) - print(f"[PRReview] DEBUG: worktree_dir={worktree_dir}", flush=True) - print(f"[PRReview] DEBUG: head_sha={head_sha}", flush=True) - - worktree_dir.mkdir(parents=True, exist_ok=True) - worktree_path = worktree_dir / worktree_name - - if DEBUG_MODE: - print(f"[PRReview] DEBUG: worktree_path={worktree_path}", flush=True) - print( - f"[PRReview] DEBUG: worktree_dir exists={worktree_dir.exists()}", - flush=True, - ) - - # Fetch the commit if not available locally (handles fork PRs) - fetch_result = subprocess.run( - ["git", "fetch", "origin", head_sha], - cwd=self.project_dir, - capture_output=True, - text=True, - timeout=60, - ) - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: fetch returncode={fetch_result.returncode}", - flush=True, - ) - if fetch_result.stderr: - print( - f"[PRReview] DEBUG: fetch stderr={fetch_result.stderr[:200]}", - flush=True, - ) - - # Create detached worktree at the PR commit - result = subprocess.run( - ["git", "worktree", "add", "--detach", str(worktree_path), head_sha], - cwd=self.project_dir, - capture_output=True, - text=True, - timeout=120, # Worktree add can be slow for large repos - ) - - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: worktree add returncode={result.returncode}", - flush=True, - ) - if result.stderr: - print( - f"[PRReview] DEBUG: worktree add stderr={result.stderr[:200]}", - flush=True, - ) - if result.stdout: - print( - f"[PRReview] DEBUG: worktree add stdout={result.stdout[:200]}", - flush=True, - ) - - if result.returncode != 0: - raise RuntimeError(f"Failed to create worktree: {result.stderr}") - - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: worktree created, exists={worktree_path.exists()}", - flush=True, - ) - logger.info(f"[PRReview] Created worktree at {worktree_path}") - return worktree_path + return self.worktree_manager.create_worktree(head_sha, pr_number) def _cleanup_pr_worktree(self, worktree_path: Path) -> None: """Remove a temporary PR review worktree with fallback chain. @@ -222,100 +153,16 @@ def _cleanup_pr_worktree(self, worktree_path: Path) -> None: Args: worktree_path: Path to the worktree to remove """ - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: _cleanup_pr_worktree called with {worktree_path}", - flush=True, - ) - - if not worktree_path or not worktree_path.exists(): - if DEBUG_MODE: - print( - "[PRReview] DEBUG: worktree path doesn't exist, skipping cleanup", - flush=True, - ) - return - - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: Attempting to remove worktree at {worktree_path}", - flush=True, - ) - - # Try 1: git worktree remove - result = subprocess.run( - ["git", "worktree", "remove", "--force", str(worktree_path)], - cwd=self.project_dir, - capture_output=True, - text=True, - timeout=30, - ) - - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: worktree remove returncode={result.returncode}", - flush=True, - ) - - if result.returncode == 0: - logger.info(f"[PRReview] Cleaned up worktree: {worktree_path.name}") - return - - # Try 2: shutil.rmtree fallback - try: - shutil.rmtree(worktree_path, ignore_errors=True) - subprocess.run( - ["git", "worktree", "prune"], - cwd=self.project_dir, - capture_output=True, - timeout=30, - ) - logger.warning(f"[PRReview] Used shutil fallback for: {worktree_path.name}") - except Exception as e: - logger.error(f"[PRReview] Failed to cleanup worktree {worktree_path}: {e}") + self.worktree_manager.remove_worktree(worktree_path) def _cleanup_stale_pr_worktrees(self) -> None: - """Clean up orphaned PR review worktrees on startup.""" - worktree_dir = self.project_dir / PR_WORKTREE_DIR - if not worktree_dir.exists(): - return - - # Get registered worktrees from git - result = subprocess.run( - ["git", "worktree", "list", "--porcelain"], - cwd=self.project_dir, - capture_output=True, - text=True, - timeout=30, - ) - registered = set() - for line in result.stdout.split("\n"): - if line.startswith("worktree "): - # Safely parse - check bounds to prevent IndexError - parts = line.split(" ", 1) - if len(parts) > 1 and parts[1]: - registered.add(Path(parts[1])) - - # Remove unregistered directories - stale_count = 0 - for item in worktree_dir.iterdir(): - if item.is_dir() and item not in registered: - logger.info(f"[PRReview] Removing stale worktree: {item.name}") - shutil.rmtree(item, ignore_errors=True) - stale_count += 1 - - if stale_count > 0: - subprocess.run( - ["git", "worktree", "prune"], - cwd=self.project_dir, - capture_output=True, - timeout=30, + """Clean up orphaned, expired, and excess PR review worktrees on startup.""" + stats = self.worktree_manager.cleanup_worktrees() + if stats["total"] > 0: + logger.info( + f"[PRReview] Cleanup: removed {stats['total']} worktrees " + f"(orphaned={stats['orphaned']}, expired={stats['expired']}, excess={stats['excess']})" ) - if DEBUG_MODE: - print( - f"[PRReview] DEBUG: Cleaned up {stale_count} stale worktree(s)", - flush=True, - ) def _define_specialist_agents(self) -> dict[str, AgentDefinition]: """ @@ -584,7 +431,7 @@ def _create_finding_from_structured(self, finding_data: Any) -> PRReviewFinding: category=category, severity=severity, suggested_fix=finding_data.suggested_fix or "", - confidence=self._normalize_confidence(finding_data.confidence), + evidence=finding_data.evidence, ) async def review(self, context: PRContext) -> PRReviewResult: @@ -769,9 +616,9 @@ async def review(self, context: PRContext) -> PRReviewResult: f"[ParallelOrchestrator] Review complete: {len(unique_findings)} findings" ) - # Generate verdict + # Generate verdict (includes merge conflict check) verdict, verdict_reasoning, blockers = self._generate_verdict( - unique_findings + unique_findings, has_merge_conflicts=context.has_merge_conflicts ) # Generate summary @@ -799,6 +646,27 @@ async def review(self, context: PRContext) -> PRReviewResult: latest_commit = context.commits[-1] head_sha = latest_commit.get("oid") or latest_commit.get("sha") + # Get file blob SHAs for rebase-resistant follow-up reviews + # Blob SHAs persist across rebases - same content = same blob SHA + file_blobs: dict[str, str] = {} + try: + gh_client = GHClient( + project_dir=self.project_dir, + default_timeout=30.0, + repo=self.config.repo, + ) + pr_files = await gh_client.get_pr_files(context.pr_number) + for file in pr_files: + filename = file.get("filename", "") + blob_sha = file.get("sha", "") + if filename and blob_sha: + file_blobs[filename] = blob_sha + logger.info( + f"Captured {len(file_blobs)} file blob SHAs for follow-up tracking" + ) + except Exception as e: + logger.warning(f"Could not capture file blobs: {e}") + result = PRReviewResult( pr_number=context.pr_number, repo=self.config.repo, @@ -810,6 +678,7 @@ async def review(self, context: PRContext) -> PRReviewResult: verdict_reasoning=verdict_reasoning, blockers=blockers, reviewed_commit_sha=head_sha, + reviewed_file_blobs=file_blobs, ) self._report_progress( @@ -945,7 +814,7 @@ def _create_finding_from_dict(self, f_data: dict[str, Any]) -> PRReviewFinding: category=category, severity=severity, suggested_fix=f_data.get("suggested_fix", ""), - confidence=self._normalize_confidence(f_data.get("confidence", 85)), + evidence=f_data.get("evidence"), ) def _parse_text_output(self, output: str) -> list[PRReviewFinding]: @@ -993,11 +862,17 @@ def _deduplicate_findings( return unique def _generate_verdict( - self, findings: list[PRReviewFinding] + self, findings: list[PRReviewFinding], has_merge_conflicts: bool = False ) -> tuple[MergeVerdict, str, list[str]]: - """Generate merge verdict based on findings.""" + """Generate merge verdict based on findings and merge conflict status.""" blockers = [] + # CRITICAL: Merge conflicts block merging - check first + if has_merge_conflicts: + blockers.append( + "Merge Conflicts: PR has conflicts with base branch that must be resolved" + ) + critical = [f for f in findings if f.severity == ReviewSeverity.CRITICAL] high = [f for f in findings if f.severity == ReviewSeverity.HIGH] medium = [f for f in findings if f.severity == ReviewSeverity.MEDIUM] @@ -1007,8 +882,19 @@ def _generate_verdict( blockers.append(f"Critical: {f.title} ({f.file}:{f.line})") if blockers: - verdict = MergeVerdict.BLOCKED - reasoning = f"Blocked by {len(blockers)} critical issue(s)" + # Merge conflicts are the highest priority blocker + if has_merge_conflicts: + verdict = MergeVerdict.BLOCKED + reasoning = ( + "Blocked: PR has merge conflicts with base branch. " + "Resolve conflicts before merge." + ) + elif critical: + verdict = MergeVerdict.BLOCKED + reasoning = f"Blocked by {len(critical)} critical issue(s)" + else: + verdict = MergeVerdict.BLOCKED + reasoning = f"Blocked by {len(blockers)} issue(s)" elif high or medium: # High and Medium severity findings block merge verdict = MergeVerdict.NEEDS_REVISION diff --git a/apps/backend/runners/github/services/pr_review_engine.py b/apps/backend/runners/github/services/pr_review_engine.py index 24d1fb69f0..d8832539e7 100644 --- a/apps/backend/runners/github/services/pr_review_engine.py +++ b/apps/backend/runners/github/services/pr_review_engine.py @@ -242,7 +242,9 @@ async def run_review_pass( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text if review_pass == ReviewPass.QUICK_SCAN: @@ -502,7 +504,9 @@ async def _run_structural_pass(self, context: PRContext) -> str: msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text except Exception as e: print(f"[AI] Structural pass error: {e}", flush=True) @@ -558,7 +562,9 @@ async def _run_ai_triage_pass(self, context: PRContext) -> str: msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text except Exception as e: print(f"[AI] AI triage pass error: {e}", flush=True) diff --git a/apps/backend/runners/github/services/pr_worktree_manager.py b/apps/backend/runners/github/services/pr_worktree_manager.py new file mode 100644 index 0000000000..1fa921bfa1 --- /dev/null +++ b/apps/backend/runners/github/services/pr_worktree_manager.py @@ -0,0 +1,425 @@ +""" +PR Worktree Manager +=================== + +Manages lifecycle of PR review worktrees with cleanup policies. + +Features: +- Age-based cleanup (remove worktrees older than N days) +- Count-based cleanup (keep only N most recent worktrees) +- Orphaned worktree cleanup (worktrees not registered with git) +- Automatic cleanup on review completion +""" + +from __future__ import annotations + +import logging +import os +import shutil +import subprocess +import time +from pathlib import Path +from typing import NamedTuple + +logger = logging.getLogger(__name__) + +# Default cleanup policies (can be overridden via environment variables) +DEFAULT_MAX_PR_WORKTREES = 10 # Max worktrees to keep +DEFAULT_PR_WORKTREE_MAX_AGE_DAYS = 7 # Max age in days + + +def _get_max_pr_worktrees() -> int: + """Get max worktrees setting, read at runtime for testability.""" + try: + value = int(os.environ.get("MAX_PR_WORKTREES", str(DEFAULT_MAX_PR_WORKTREES))) + return value if value > 0 else DEFAULT_MAX_PR_WORKTREES + except (ValueError, TypeError): + return DEFAULT_MAX_PR_WORKTREES + + +def _get_max_age_days() -> int: + """Get max age setting, read at runtime for testability.""" + try: + value = int( + os.environ.get( + "PR_WORKTREE_MAX_AGE_DAYS", str(DEFAULT_PR_WORKTREE_MAX_AGE_DAYS) + ) + ) + return value if value >= 0 else DEFAULT_PR_WORKTREE_MAX_AGE_DAYS + except (ValueError, TypeError): + return DEFAULT_PR_WORKTREE_MAX_AGE_DAYS + + +# Safe pattern for git refs (SHA, branch names) +# Allows: alphanumeric, dots, underscores, hyphens, forward slashes +import re + +SAFE_REF_PATTERN = re.compile(r"^[a-zA-Z0-9._/\-]+$") + + +class WorktreeInfo(NamedTuple): + """Information about a PR worktree.""" + + path: Path + age_days: float + pr_number: int | None = None + + +class PRWorktreeManager: + """ + Manages PR review worktrees with automatic cleanup policies. + + Cleanup policies: + 1. Remove worktrees older than PR_WORKTREE_MAX_AGE_DAYS (default: 7 days) + 2. Keep only MAX_PR_WORKTREES most recent worktrees (default: 10) + 3. Remove orphaned worktrees (not registered with git) + """ + + def __init__(self, project_dir: Path, worktree_dir: str | Path): + """ + Initialize the worktree manager. + + Args: + project_dir: Root directory of the git project + worktree_dir: Directory where PR worktrees are stored (relative to project_dir) + """ + self.project_dir = Path(project_dir) + self.worktree_base_dir = self.project_dir / worktree_dir + + def create_worktree( + self, head_sha: str, pr_number: int, auto_cleanup: bool = True + ) -> Path: + """ + Create a PR worktree with automatic cleanup of old worktrees. + + Args: + head_sha: Git commit SHA to checkout + pr_number: PR number for naming + auto_cleanup: If True (default), run cleanup before creating + + Returns: + Path to the created worktree + + Raises: + RuntimeError: If worktree creation fails + ValueError: If head_sha or pr_number are invalid + """ + # Validate inputs to prevent command injection + if not head_sha or not SAFE_REF_PATTERN.match(head_sha): + raise ValueError( + f"Invalid head_sha: must match pattern {SAFE_REF_PATTERN.pattern}" + ) + if not isinstance(pr_number, int) or pr_number <= 0: + raise ValueError( + f"Invalid pr_number: must be a positive integer, got {pr_number}" + ) + + # Run cleanup before creating new worktree (can be disabled for tests) + if auto_cleanup: + self.cleanup_worktrees() + + # Generate worktree name with timestamp for uniqueness + sha_short = head_sha[:8] + timestamp = int(time.time() * 1000) # Millisecond precision + worktree_name = f"pr-{pr_number}-{sha_short}-{timestamp}" + + # Create worktree directory + self.worktree_base_dir.mkdir(parents=True, exist_ok=True) + worktree_path = self.worktree_base_dir / worktree_name + + logger.debug(f"Creating worktree: {worktree_path}") + + try: + # Fetch the commit if not available locally (handles fork PRs) + fetch_result = subprocess.run( + ["git", "fetch", "origin", head_sha], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=60, + ) + + if fetch_result.returncode != 0: + logger.warning( + f"Could not fetch {head_sha} from origin (fork PR?): {fetch_result.stderr}" + ) + except subprocess.TimeoutExpired: + logger.warning( + f"Timeout fetching {head_sha} from origin, continuing anyway" + ) + + try: + # Create detached worktree at the PR commit + result = subprocess.run( + ["git", "worktree", "add", "--detach", str(worktree_path), head_sha], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=120, + ) + + if result.returncode != 0: + raise RuntimeError(f"Failed to create worktree: {result.stderr}") + except subprocess.TimeoutExpired: + # Clean up partial worktree on timeout + if worktree_path.exists(): + shutil.rmtree(worktree_path, ignore_errors=True) + raise RuntimeError(f"Timeout creating worktree for {head_sha}") + + logger.info(f"[WorktreeManager] Created worktree at {worktree_path}") + return worktree_path + + def remove_worktree(self, worktree_path: Path) -> None: + """ + Remove a PR worktree with fallback chain. + + Args: + worktree_path: Path to the worktree to remove + """ + if not worktree_path or not worktree_path.exists(): + return + + logger.debug(f"Removing worktree: {worktree_path}") + + # Try 1: git worktree remove + try: + result = subprocess.run( + ["git", "worktree", "remove", "--force", str(worktree_path)], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=60, + ) + + if result.returncode == 0: + logger.info(f"[WorktreeManager] Removed worktree: {worktree_path.name}") + return + except subprocess.TimeoutExpired: + logger.warning( + f"Timeout removing worktree {worktree_path.name}, falling back to shutil" + ) + + # Try 2: shutil.rmtree fallback + try: + shutil.rmtree(worktree_path, ignore_errors=True) + subprocess.run( + ["git", "worktree", "prune"], + cwd=self.project_dir, + capture_output=True, + timeout=30, + ) + logger.warning( + f"[WorktreeManager] Used shutil fallback for: {worktree_path.name}" + ) + except Exception as e: + logger.error( + f"[WorktreeManager] Failed to remove worktree {worktree_path}: {e}" + ) + + def get_worktree_info(self) -> list[WorktreeInfo]: + """ + Get information about all PR worktrees. + + Returns: + List of WorktreeInfo objects sorted by age (oldest first) + """ + if not self.worktree_base_dir.exists(): + return [] + + worktrees = [] + current_time = time.time() + + for item in self.worktree_base_dir.iterdir(): + if not item.is_dir(): + continue + + # Get modification time + mtime = item.stat().st_mtime + age_seconds = current_time - mtime + age_days = age_seconds / 86400 # Convert seconds to days + + # Extract PR number from directory name (format: pr-XXX-sha) + pr_number = None + if item.name.startswith("pr-"): + parts = item.name.split("-") + if len(parts) >= 2: + try: + pr_number = int(parts[1]) + except ValueError: + pass + + worktrees.append( + WorktreeInfo(path=item, age_days=age_days, pr_number=pr_number) + ) + + # Sort by age (oldest first) + worktrees.sort(key=lambda x: x.age_days, reverse=True) + + return worktrees + + def get_registered_worktrees(self) -> set[Path]: + """ + Get set of worktrees registered with git. + + Returns: + Set of resolved Path objects for registered worktrees + """ + try: + result = subprocess.run( + ["git", "worktree", "list", "--porcelain"], + cwd=self.project_dir, + capture_output=True, + text=True, + timeout=30, + ) + except subprocess.TimeoutExpired: + logger.warning("Timeout listing worktrees, returning empty set") + return set() + + registered = set() + for line in result.stdout.split("\n"): + if line.startswith("worktree "): + parts = line.split(" ", 1) + if len(parts) > 1 and parts[1]: + registered.add(Path(parts[1])) + + return registered + + def cleanup_worktrees(self, force: bool = False) -> dict[str, int]: + """ + Clean up PR worktrees based on age and count policies. + + Cleanup order: + 1. Remove orphaned worktrees (not registered with git) + 2. Remove worktrees older than PR_WORKTREE_MAX_AGE_DAYS + 3. If still over MAX_PR_WORKTREES, remove oldest worktrees + + Args: + force: If True, skip age check and only enforce count limit + + Returns: + Dict with cleanup statistics: { + 'orphaned': count, + 'expired': count, + 'excess': count, + 'total': count + } + """ + stats = {"orphaned": 0, "expired": 0, "excess": 0, "total": 0} + + if not self.worktree_base_dir.exists(): + return stats + + # Get registered worktrees (resolved paths for consistent comparison) + registered = self.get_registered_worktrees() + registered_resolved = {p.resolve() for p in registered} + + # Get all PR worktree info + worktrees = self.get_worktree_info() + + # Phase 1: Remove orphaned worktrees + for wt in worktrees: + if wt.path.resolve() not in registered_resolved: + logger.info( + f"[WorktreeManager] Removing orphaned worktree: {wt.path.name} (age: {wt.age_days:.1f} days)" + ) + shutil.rmtree(wt.path, ignore_errors=True) + stats["orphaned"] += 1 + + # Refresh worktree list after orphan cleanup + try: + subprocess.run( + ["git", "worktree", "prune"], + cwd=self.project_dir, + capture_output=True, + timeout=30, + ) + except subprocess.TimeoutExpired: + logger.warning("Timeout pruning worktrees, continuing anyway") + + # Refresh registered worktrees after prune (git's internal registry may have changed) + registered_resolved = {p.resolve() for p in self.get_registered_worktrees()} + + # Get fresh worktree info for remaining worktrees (use resolved paths) + worktrees = [ + wt + for wt in self.get_worktree_info() + if wt.path.resolve() in registered_resolved + ] + + # Phase 2: Remove expired worktrees (older than max age) + max_age_days = _get_max_age_days() + if not force: + for wt in worktrees: + if wt.age_days > max_age_days: + logger.info( + f"[WorktreeManager] Removing expired worktree: {wt.path.name} (age: {wt.age_days:.1f} days, max: {max_age_days} days)" + ) + self.remove_worktree(wt.path) + stats["expired"] += 1 + + # Refresh worktree list after expiration cleanup (use resolved paths) + registered_resolved = {p.resolve() for p in self.get_registered_worktrees()} + worktrees = [ + wt + for wt in self.get_worktree_info() + if wt.path.resolve() in registered_resolved + ] + + # Phase 3: Remove excess worktrees (keep only max_pr_worktrees most recent) + max_pr_worktrees = _get_max_pr_worktrees() + if len(worktrees) > max_pr_worktrees: + # worktrees are already sorted by age (oldest first) + excess_count = len(worktrees) - max_pr_worktrees + for wt in worktrees[:excess_count]: + logger.info( + f"[WorktreeManager] Removing excess worktree: {wt.path.name} (count: {len(worktrees)}, max: {max_pr_worktrees})" + ) + self.remove_worktree(wt.path) + stats["excess"] += 1 + + stats["total"] = stats["orphaned"] + stats["expired"] + stats["excess"] + + if stats["total"] > 0: + logger.info( + f"[WorktreeManager] Cleanup complete: {stats['total']} worktrees removed " + f"(orphaned={stats['orphaned']}, expired={stats['expired']}, excess={stats['excess']})" + ) + else: + logger.debug( + f"No cleanup needed (current: {len(worktrees)}, max: {max_pr_worktrees})" + ) + + return stats + + def cleanup_all_worktrees(self) -> int: + """ + Remove ALL PR worktrees (for testing or emergency cleanup). + + Returns: + Number of worktrees removed + """ + if not self.worktree_base_dir.exists(): + return 0 + + worktrees = self.get_worktree_info() + count = 0 + + for wt in worktrees: + logger.info(f"[WorktreeManager] Removing worktree: {wt.path.name}") + self.remove_worktree(wt.path) + count += 1 + + if count > 0: + try: + subprocess.run( + ["git", "worktree", "prune"], + cwd=self.project_dir, + capture_output=True, + timeout=30, + ) + except subprocess.TimeoutExpired: + logger.warning("Timeout pruning worktrees after cleanup") + logger.info(f"[WorktreeManager] Removed all {count} PR worktrees") + + return count diff --git a/apps/backend/runners/github/services/pydantic_models.py b/apps/backend/runners/github/services/pydantic_models.py index 3c91a219eb..6777e97690 100644 --- a/apps/backend/runners/github/services/pydantic_models.py +++ b/apps/backend/runners/github/services/pydantic_models.py @@ -26,7 +26,7 @@ from typing import Literal -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, Field # ============================================================================= # Common Finding Types @@ -46,6 +46,10 @@ class BaseFinding(BaseModel): line: int = Field(0, description="Line number of the issue") suggested_fix: str | None = Field(None, description="How to fix this issue") fixable: bool = Field(False, description="Whether this can be auto-fixed") + evidence: str | None = Field( + None, + description="Actual code snippet proving the issue exists. Required for validation.", + ) class SecurityFinding(BaseFinding): @@ -78,9 +82,6 @@ class DeepAnalysisFinding(BaseFinding): "performance", "logic", ] = Field(description="Issue category") - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="AI's confidence in this finding (0.0-1.0)" - ) verification_note: str | None = Field( None, description="What evidence is missing or couldn't be verified" ) @@ -315,21 +316,11 @@ class OrchestratorFinding(BaseModel): description="Issue severity level" ) suggestion: str | None = Field(None, description="How to fix this issue") - confidence: float = Field( - 0.85, - ge=0.0, - le=1.0, - description="Confidence (0.0-1.0 or 0-100, normalized to 0.0-1.0)", + evidence: str | None = Field( + None, + description="Actual code snippet proving the issue exists. Required for validation.", ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range (accepts 0-100 or 0.0-1.0).""" - if v > 1: - return v / 100.0 - return float(v) - class OrchestratorReviewResponse(BaseModel): """Complete response schema for orchestrator PR review.""" @@ -355,9 +346,6 @@ class LogicFinding(BaseFinding): category: Literal["logic"] = Field( default="logic", description="Always 'logic' for logic findings" ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)" - ) example_input: str | None = Field( None, description="Concrete input that triggers the bug" ) @@ -366,14 +354,6 @@ class LogicFinding(BaseFinding): None, description="What the code should produce" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class CodebaseFitFinding(BaseFinding): """A codebase fit finding from the codebase fit review agent.""" @@ -381,9 +361,6 @@ class CodebaseFitFinding(BaseFinding): category: Literal["codebase_fit"] = Field( default="codebase_fit", description="Always 'codebase_fit' for fit findings" ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)" - ) existing_code: str | None = Field( None, description="Reference to existing code that should be used instead" ) @@ -391,14 +368,6 @@ class CodebaseFitFinding(BaseFinding): None, description="Description of the established pattern being violated" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class ParallelOrchestratorFinding(BaseModel): """A finding from the parallel orchestrator with source agent tracking.""" @@ -423,8 +392,9 @@ class ParallelOrchestratorFinding(BaseModel): severity: Literal["critical", "high", "medium", "low"] = Field( description="Issue severity level" ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)" + evidence: str | None = Field( + None, + description="Actual code snippet proving the issue exists. Required for validation.", ) suggested_fix: str | None = Field(None, description="How to fix this issue") fixable: bool = Field(False, description="Whether this can be auto-fixed") @@ -436,14 +406,6 @@ class ParallelOrchestratorFinding(BaseModel): False, description="Whether multiple agents agreed on this finding" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class AgentAgreement(BaseModel): """Tracks agreement between agents on findings.""" @@ -496,22 +458,14 @@ class ResolutionVerification(BaseModel): status: Literal["resolved", "partially_resolved", "unresolved", "cant_verify"] = ( Field(description="Resolution status after AI verification") ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in the resolution status" + evidence: str = Field( + min_length=1, + description="Actual code snippet showing the resolution status. Required.", ) - evidence: str = Field(description="What evidence supports this resolution status") resolution_notes: str | None = Field( None, description="Detailed notes on how the issue was addressed" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class ParallelFollowupFinding(BaseModel): """A finding from parallel follow-up review with source agent tracking.""" @@ -534,8 +488,9 @@ class ParallelFollowupFinding(BaseModel): severity: Literal["critical", "high", "medium", "low"] = Field( description="Issue severity level" ) - confidence: float = Field( - 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)" + evidence: str | None = Field( + None, + description="Actual code snippet proving the issue exists. Required for validation.", ) suggested_fix: str | None = Field(None, description="How to fix this issue") fixable: bool = Field(False, description="Whether this can be auto-fixed") @@ -546,14 +501,6 @@ class ParallelFollowupFinding(BaseModel): None, description="ID of related previous finding if this is a regression" ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range.""" - if v > 1: - return v / 100.0 - return float(v) - class CommentAnalysis(BaseModel): """Analysis of a contributor or AI comment.""" @@ -640,6 +587,9 @@ class FindingValidationResult(BaseModel): The finding-validator agent uses this to report whether a previous finding is a genuine issue or a false positive that should be dismissed. + + EVIDENCE-BASED VALIDATION: No confidence scores - validation is binary. + Either the evidence shows the issue exists, or it doesn't. """ finding_id: str = Field(description="ID of the finding being validated") @@ -648,16 +598,17 @@ class FindingValidationResult(BaseModel): ] = Field( description=( "Validation result: " - "confirmed_valid = issue IS real, keep as unresolved; " - "dismissed_false_positive = original finding was incorrect, remove; " - "needs_human_review = cannot determine with confidence" + "confirmed_valid = code evidence proves issue IS real; " + "dismissed_false_positive = code evidence proves issue does NOT exist; " + "needs_human_review = cannot find definitive evidence either way" ) ) code_evidence: str = Field( min_length=1, description=( "REQUIRED: Exact code snippet examined from the file. " - "Must be actual code, not a description." + "Must be actual code copy-pasted from the file, not a description. " + "This is the proof that determines the validation status." ), ) line_range: tuple[int, int] = Field( @@ -666,27 +617,18 @@ class FindingValidationResult(BaseModel): explanation: str = Field( min_length=20, description=( - "Detailed explanation of why the finding is valid/invalid. " - "Must reference specific code and explain the reasoning." + "Detailed explanation connecting the code_evidence to the validation_status. " + "Must explain: (1) what the original finding claimed, (2) what the actual code shows, " + "(3) why this proves/disproves the issue." ), ) - confidence: float = Field( - ge=0.0, - le=1.0, + evidence_verified_in_file: bool = Field( description=( - "Confidence in the validation result (0.0-1.0). " - "Must be >= 0.80 to dismiss as false positive, >= 0.70 to confirm valid." - ), + "True if the code_evidence was verified to exist at the specified line_range. " + "False if the code couldn't be found (indicates hallucination in original finding)." + ) ) - @field_validator("confidence", mode="before") - @classmethod - def normalize_confidence(cls, v: int | float) -> float: - """Normalize confidence to 0.0-1.0 range (accepts 0-100 or 0.0-1.0).""" - if v > 1: - return v / 100.0 - return float(v) - class FindingValidationResponse(BaseModel): """Complete response from the finding-validator agent.""" diff --git a/apps/backend/runners/github/services/response_parsers.py b/apps/backend/runners/github/services/response_parsers.py index db318463d2..2df83ea06b 100644 --- a/apps/backend/runners/github/services/response_parsers.py +++ b/apps/backend/runners/github/services/response_parsers.py @@ -33,8 +33,9 @@ TriageResult, ) -# Confidence threshold for filtering findings (GitHub Copilot standard) -CONFIDENCE_THRESHOLD = 0.80 +# Evidence-based validation replaces confidence scoring +# Findings without evidence are filtered out instead of using confidence thresholds +MIN_EVIDENCE_LENGTH = 20 # Minimum chars for evidence to be considered valid class ResponseParser: @@ -65,9 +66,13 @@ def parse_scan_result(response_text: str) -> dict: @staticmethod def parse_review_findings( - response_text: str, apply_confidence_filter: bool = True + response_text: str, require_evidence: bool = True ) -> list[PRReviewFinding]: - """Parse findings from AI response with optional confidence filtering.""" + """Parse findings from AI response with optional evidence validation. + + Evidence-based validation: Instead of confidence scores, findings + require actual code evidence proving the issue exists. + """ findings = [] try: @@ -77,14 +82,14 @@ def parse_review_findings( if json_match: findings_data = json.loads(json_match.group(1)) for i, f in enumerate(findings_data): - # Get confidence (default to 0.85 if not provided for backward compat) - confidence = float(f.get("confidence", 0.85)) + # Get evidence (code snippet proving the issue) + evidence = f.get("evidence") or f.get("code_snippet") or "" - # Apply confidence threshold filter - if apply_confidence_filter and confidence < CONFIDENCE_THRESHOLD: + # Apply evidence-based validation + if require_evidence and len(evidence.strip()) < MIN_EVIDENCE_LENGTH: print( f"[AI] Dropped finding '{f.get('title', 'unknown')}': " - f"confidence {confidence:.2f} < {CONFIDENCE_THRESHOLD}", + f"insufficient evidence ({len(evidence.strip())} chars < {MIN_EVIDENCE_LENGTH})", flush=True, ) continue @@ -105,8 +110,8 @@ def parse_review_findings( end_line=f.get("end_line"), suggested_fix=f.get("suggested_fix"), fixable=f.get("fixable", False), - # NEW: Support verification and redundancy fields - confidence=confidence, + # Evidence-based validation fields + evidence=evidence if evidence.strip() else None, verification_note=f.get("verification_note"), redundant_with=f.get("redundant_with"), ) diff --git a/apps/backend/runners/github/services/review_tools.py b/apps/backend/runners/github/services/review_tools.py index 881d8353cf..1a53a6b126 100644 --- a/apps/backend/runners/github/services/review_tools.py +++ b/apps/backend/runners/github/services/review_tools.py @@ -140,7 +140,9 @@ async def spawn_security_review( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text # Parse findings @@ -223,7 +225,9 @@ async def spawn_quality_review( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text findings = _parse_findings_from_response(result_text, source="quality_agent") @@ -316,7 +320,9 @@ async def spawn_deep_analysis( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text findings = _parse_findings_from_response(result_text, source="deep_analysis") diff --git a/apps/backend/runners/github/services/sdk_utils.py b/apps/backend/runners/github/services/sdk_utils.py index 0e6da74f30..7471f16360 100644 --- a/apps/backend/runners/github/services/sdk_utils.py +++ b/apps/backend/runners/github/services/sdk_utils.py @@ -235,8 +235,9 @@ async def process_sdk_stream( if on_tool_use: on_tool_use(tool_name, tool_id, tool_input) - # Collect text - if hasattr(block, "text"): + # Collect text - must check block type since only TextBlock has .text + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text # Always print text content preview (not just in DEBUG_MODE) text_preview = block.text[:500].replace("\n", " ").strip() diff --git a/apps/backend/runners/github/services/triage_engine.py b/apps/backend/runners/github/services/triage_engine.py index 2508207012..57a6b04310 100644 --- a/apps/backend/runners/github/services/triage_engine.py +++ b/apps/backend/runners/github/services/triage_engine.py @@ -87,7 +87,9 @@ async def triage_single_issue( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text return self.parser.parse_triage_result( diff --git a/apps/backend/runners/gitlab/runner.py b/apps/backend/runners/gitlab/runner.py index c2a0be32a5..d4f61827bb 100644 --- a/apps/backend/runners/gitlab/runner.py +++ b/apps/backend/runners/gitlab/runner.py @@ -26,8 +26,10 @@ # Add backend to path sys.path.insert(0, str(Path(__file__).parent.parent.parent)) -# Load .env file -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent.parent / ".env" if env_file.exists(): diff --git a/apps/backend/runners/gitlab/services/mr_review_engine.py b/apps/backend/runners/gitlab/services/mr_review_engine.py index d1679a4b62..ef8ef9aaf0 100644 --- a/apps/backend/runners/gitlab/services/mr_review_engine.py +++ b/apps/backend/runners/gitlab/services/mr_review_engine.py @@ -234,7 +234,9 @@ async def run_review( msg_type = type(msg).__name__ if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): result_text += block.text self._report_progress( diff --git a/apps/backend/runners/ideation_runner.py b/apps/backend/runners/ideation_runner.py index 63714a372f..9b91445601 100644 --- a/apps/backend/runners/ideation_runner.py +++ b/apps/backend/runners/ideation_runner.py @@ -26,8 +26,10 @@ # Add auto-claude to path sys.path.insert(0, str(Path(__file__).parent.parent)) -# Load .env file from auto-claude/ directory -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent / ".env" if env_file.exists(): @@ -94,8 +96,8 @@ def main(): parser.add_argument( "--model", type=str, - default="claude-opus-4-5-20251101", - help="Model to use (default: claude-opus-4-5-20251101)", + default="sonnet", # Changed from "opus" (fix #433) + help="Model to use (haiku, sonnet, opus, or full model ID)", ) parser.add_argument( "--thinking-level", diff --git a/apps/backend/runners/insights_runner.py b/apps/backend/runners/insights_runner.py index a2de9f9408..bd4bf362c4 100644 --- a/apps/backend/runners/insights_runner.py +++ b/apps/backend/runners/insights_runner.py @@ -15,8 +15,10 @@ # Add auto-claude to path sys.path.insert(0, str(Path(__file__).parent.parent)) -# Load .env file from auto-claude/ directory -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent / ".env" if env_file.exists(): @@ -39,6 +41,7 @@ debug_section, debug_success, ) +from phase_config import resolve_model_id def load_project_context(project_dir: str) -> str: @@ -132,7 +135,7 @@ async def run_with_sdk( project_dir: str, message: str, history: list, - model: str = "claude-sonnet-4-5-20250929", + model: str = "sonnet", # Shorthand - resolved via API Profile if configured thinking_level: str = "medium", ) -> None: """Run the chat using Claude SDK with streaming.""" @@ -180,7 +183,7 @@ async def run_with_sdk( # Create Claude SDK client with appropriate settings for insights client = ClaudeSDKClient( options=ClaudeAgentOptions( - model=model, # Use configured model + model=resolve_model_id(model), # Resolve via API Profile if configured system_prompt=system_prompt, allowed_tools=[ "Read", @@ -336,8 +339,8 @@ def main(): ) parser.add_argument( "--model", - default="claude-sonnet-4-5-20250929", - help="Claude model ID (default: claude-sonnet-4-5-20250929)", + default="sonnet", + help="Model to use (haiku, sonnet, opus, or full model ID)", ) parser.add_argument( "--thinking-level", diff --git a/apps/backend/runners/roadmap/models.py b/apps/backend/runners/roadmap/models.py index cc7a1f5f8b..377f5cfacc 100644 --- a/apps/backend/runners/roadmap/models.py +++ b/apps/backend/runners/roadmap/models.py @@ -23,6 +23,6 @@ class RoadmapConfig: project_dir: Path output_dir: Path - model: str = "claude-opus-4-5-20251101" + model: str = "sonnet" # Changed from "opus" (fix #433) refresh: bool = False # Force regeneration even if roadmap exists enable_competitor_analysis: bool = False # Enable competitor analysis phase diff --git a/apps/backend/runners/roadmap/orchestrator.py b/apps/backend/runners/roadmap/orchestrator.py index b7a9803af1..b49ca2c1cb 100644 --- a/apps/backend/runners/roadmap/orchestrator.py +++ b/apps/backend/runners/roadmap/orchestrator.py @@ -27,7 +27,7 @@ def __init__( self, project_dir: Path, output_dir: Path | None = None, - model: str = "claude-opus-4-5-20251101", + model: str = "sonnet", # Changed from "opus" (fix #433) thinking_level: str = "medium", refresh: bool = False, enable_competitor_analysis: bool = False, diff --git a/apps/backend/runners/roadmap_runner.py b/apps/backend/runners/roadmap_runner.py index 88f157b12c..06625add7e 100644 --- a/apps/backend/runners/roadmap_runner.py +++ b/apps/backend/runners/roadmap_runner.py @@ -20,8 +20,10 @@ # Add auto-claude to path sys.path.insert(0, str(Path(__file__).parent.parent)) -# Load .env file from auto-claude/ directory -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent / ".env" if env_file.exists(): @@ -55,8 +57,8 @@ def main(): parser.add_argument( "--model", type=str, - default="claude-opus-4-5-20251101", - help="Model to use (default: claude-opus-4-5-20251101)", + default="sonnet", # Changed from "opus" (fix #433) + help="Model to use (haiku, sonnet, opus, or full model ID)", ) parser.add_argument( "--thinking-level", diff --git a/apps/backend/runners/spec_runner.py b/apps/backend/runners/spec_runner.py index 0bda6db115..30adbf3fa6 100644 --- a/apps/backend/runners/spec_runner.py +++ b/apps/backend/runners/spec_runner.py @@ -26,11 +26,11 @@ - Risk factors and edge cases Usage: - python auto-claude/spec_runner.py --task "Add user authentication" - python auto-claude/spec_runner.py --interactive - python auto-claude/spec_runner.py --continue 001-feature - python auto-claude/spec_runner.py --task "Fix button color" --complexity simple - python auto-claude/spec_runner.py --task "Simple fix" --no-ai-assessment + python runners/spec_runner.py --task "Add user authentication" + python runners/spec_runner.py --interactive + python runners/spec_runner.py --continue 001-feature + python runners/spec_runner.py --task "Fix button color" --complexity simple + python runners/spec_runner.py --task "Simple fix" --no-ai-assessment """ import sys @@ -81,8 +81,10 @@ # Add auto-claude to path (parent of runners/) sys.path.insert(0, str(Path(__file__).parent.parent)) -# Load .env file -from dotenv import load_dotenv +# Load .env file with centralized error handling +from cli.utils import import_dotenv + +load_dotenv = import_dotenv() env_file = Path(__file__).parent.parent / ".env" dev_env_file = Path(__file__).parent.parent.parent / "dev" / "auto-claude" / ".env" @@ -198,9 +200,21 @@ def main(): default=None, help="Base branch for creating worktrees (default: auto-detect or current branch)", ) + parser.add_argument( + "--direct", + action="store_true", + help="Build directly in project without worktree isolation (default: use isolated worktree)", + ) args = parser.parse_args() + # Warn user about direct mode risks + if args.direct: + print_status( + "Direct mode: Building in project directory without worktree isolation", + "warning", + ) + # Handle task from file if provided task_description = args.task if args.task_file: @@ -328,6 +342,10 @@ def main(): if args.base_branch: run_cmd.extend(["--base-branch", args.base_branch]) + # Pass --direct flag if specified (skip worktree isolation) + if args.direct: + run_cmd.append("--direct") + # Note: Model configuration for subsequent phases (planning, coding, qa) # is read from task_metadata.json by run.py, so we don't pass it here. # This allows per-phase configuration when using Auto profile. diff --git a/apps/backend/security/__init__.py b/apps/backend/security/__init__.py index 9b389373b6..b26311d292 100644 --- a/apps/backend/security/__init__.py +++ b/apps/backend/security/__init__.py @@ -62,7 +62,9 @@ validate_chmod_command, validate_dropdb_command, validate_dropuser_command, + validate_git_command, validate_git_commit, + validate_git_config, validate_init_script, validate_kill_command, validate_killall_command, @@ -93,7 +95,9 @@ "validate_chmod_command", "validate_rm_command", "validate_init_script", + "validate_git_command", "validate_git_commit", + "validate_git_config", "validate_dropdb_command", "validate_dropuser_command", "validate_psql_command", diff --git a/apps/backend/security/constants.py b/apps/backend/security/constants.py new file mode 100644 index 0000000000..3ddbca3002 --- /dev/null +++ b/apps/backend/security/constants.py @@ -0,0 +1,16 @@ +""" +Security Constants +================== + +Shared constants for the security module. +""" + +# Environment variable name for the project directory +# Set by agents (coder.py, loop.py) at startup to ensure security hooks +# can find the correct project directory even in worktree mode. +PROJECT_DIR_ENV_VAR = "AUTO_CLAUDE_PROJECT_DIR" + +# Security configuration filenames +# These are the files that control which commands are allowed to run. +ALLOWLIST_FILENAME = ".auto-claude-allowlist" +PROFILE_FILENAME = ".auto-claude-security.json" diff --git a/apps/backend/security/git_validators.py b/apps/backend/security/git_validators.py index 5a75ad39f1..5c21d32909 100644 --- a/apps/backend/security/git_validators.py +++ b/apps/backend/security/git_validators.py @@ -2,7 +2,9 @@ Git Validators ============== -Validators for git operations (commit with secret scanning). +Validators for git operations: +- Commit with secret scanning +- Config protection (prevent setting test users) """ import shlex @@ -10,8 +12,203 @@ from .validation_models import ValidationResult +# ============================================================================= +# BLOCKED GIT CONFIG PATTERNS +# ============================================================================= -def validate_git_commit(command_string: str) -> ValidationResult: +# Git config keys that agents must NOT modify +# These are identity settings that should inherit from the user's global config +# +# NOTE: This validation covers command-line arguments (git config, git -c). +# Environment variables (GIT_AUTHOR_NAME, GIT_AUTHOR_EMAIL, GIT_COMMITTER_NAME, +# GIT_COMMITTER_EMAIL) are NOT validated here as they require pre-execution +# environment filtering, which is handled at the sandbox/hook level. +BLOCKED_GIT_CONFIG_KEYS = { + "user.name", + "user.email", + "author.name", + "author.email", + "committer.name", + "committer.email", +} + + +def validate_git_config(command_string: str) -> ValidationResult: + """ + Validate git config commands - block identity changes. + + Agents should not set user.name, user.email, etc. as this: + 1. Breaks commit attribution + 2. Can create fake "Test User" identities + 3. Overrides the user's legitimate git identity + + Args: + command_string: The full git command string + + Returns: + Tuple of (is_valid, error_message) + """ + try: + tokens = shlex.split(command_string) + except ValueError: + return False, "Could not parse git command" # Fail closed on parse errors + + if len(tokens) < 2 or tokens[0] != "git" or tokens[1] != "config": + return True, "" # Not a git config command + + # Check for read-only operations first - these are always allowed + # --get, --get-all, --get-regexp, --list are all read operations + read_only_flags = {"--get", "--get-all", "--get-regexp", "--list", "-l"} + for token in tokens[2:]: + if token in read_only_flags: + return True, "" # Read operation, allow it + + # Extract the config key from the command + # git config [options] [value] - key is typically after config and any options + config_key = None + for token in tokens[2:]: + # Skip options (start with -) + if token.startswith("-"): + continue + # First non-option token is the config key + config_key = token.lower() + break + + if not config_key: + return True, "" # No config key specified (e.g., git config --list) + + # Check if the exact config key is blocked + for blocked_key in BLOCKED_GIT_CONFIG_KEYS: + if config_key == blocked_key: + return False, ( + f"BLOCKED: Cannot modify git identity configuration\n\n" + f"You attempted to set '{blocked_key}' which is not allowed.\n\n" + f"WHY: Git identity (user.name, user.email) must inherit from the user's " + f"global git configuration. Setting fake identities like 'Test User' breaks " + f"commit attribution and causes serious issues.\n\n" + f"WHAT TO DO: Simply commit without setting any user configuration. " + f"The repository will use the correct identity automatically." + ) + + return True, "" + + +def validate_git_inline_config(tokens: list[str]) -> ValidationResult: + """ + Check for blocked config keys passed via git -c flag. + + Git allows inline config with: git -c key=value + This bypasses 'git config' validation, so we must check all git commands + for -c flags containing blocked identity keys. + + Args: + tokens: Parsed command tokens + + Returns: + Tuple of (is_valid, error_message) + """ + i = 1 # Start after 'git' + while i < len(tokens): + token = tokens[i] + + # Check for -c flag (can be "-c key=value" or "-c" "key=value") + if token == "-c": + # Next token should be the key=value + if i + 1 < len(tokens): + config_pair = tokens[i + 1] + # Extract the key from key=value + if "=" in config_pair: + config_key = config_pair.split("=", 1)[0].lower() + if config_key in BLOCKED_GIT_CONFIG_KEYS: + return False, ( + f"BLOCKED: Cannot set git identity via -c flag\n\n" + f"You attempted to use '-c {config_pair}' which sets a blocked " + f"identity configuration.\n\n" + f"WHY: Git identity (user.name, user.email) must inherit from the " + f"user's global git configuration. Setting fake identities breaks " + f"commit attribution and causes serious issues.\n\n" + f"WHAT TO DO: Remove the -c flag and commit normally. " + f"The repository will use the correct identity automatically." + ) + i += 2 # Skip -c and its value + continue + elif token.startswith("-c"): + # Handle -ckey=value format (no space) + config_pair = token[2:] # Remove "-c" prefix + if "=" in config_pair: + config_key = config_pair.split("=", 1)[0].lower() + if config_key in BLOCKED_GIT_CONFIG_KEYS: + return False, ( + f"BLOCKED: Cannot set git identity via -c flag\n\n" + f"You attempted to use '{token}' which sets a blocked " + f"identity configuration.\n\n" + f"WHY: Git identity (user.name, user.email) must inherit from the " + f"user's global git configuration. Setting fake identities breaks " + f"commit attribution and causes serious issues.\n\n" + f"WHAT TO DO: Remove the -c flag and commit normally. " + f"The repository will use the correct identity automatically." + ) + + i += 1 + + return True, "" + + +def validate_git_command(command_string: str) -> ValidationResult: + """ + Main git validator that checks all git security rules. + + Currently validates: + - git -c: Block identity changes via inline config on ANY git command + - git config: Block identity changes + - git commit: Run secret scanning + + Args: + command_string: The full git command string + + Returns: + Tuple of (is_valid, error_message) + """ + try: + tokens = shlex.split(command_string) + except ValueError: + return False, "Could not parse git command" + + if not tokens or tokens[0] != "git": + return True, "" + + if len(tokens) < 2: + return True, "" # Just "git" with no subcommand + + # Check for blocked -c flags on ANY git command (security bypass prevention) + is_valid, error_msg = validate_git_inline_config(tokens) + if not is_valid: + return is_valid, error_msg + + # Find the actual subcommand (skip global options like -c, -C, --git-dir, etc.) + subcommand = None + for token in tokens[1:]: + # Skip options and their values + if token.startswith("-"): + continue + subcommand = token + break + + if not subcommand: + return True, "" # No subcommand found + + # Check git config commands + if subcommand == "config": + return validate_git_config(command_string) + + # Check git commit commands (secret scanning) + if subcommand == "commit": + return validate_git_commit_secrets(command_string) + + return True, "" + + +def validate_git_commit_secrets(command_string: str) -> ValidationResult: """ Validate git commit commands - run secret scan before allowing commit. @@ -99,3 +296,8 @@ def validate_git_commit(command_string: str) -> ValidationResult: ) return False, "\n".join(error_lines) + + +# Backwards compatibility alias - the registry uses this name +# Now delegates to the comprehensive validator +validate_git_commit = validate_git_command diff --git a/apps/backend/security/hooks.py b/apps/backend/security/hooks.py index 35152d4433..4bc7328d3a 100644 --- a/apps/backend/security/hooks.py +++ b/apps/backend/security/hooks.py @@ -66,10 +66,20 @@ async def bash_security_hook( return {} # Get the working directory from context or use current directory - # In the actual client, this would be set by the ClaudeSDKClient - cwd = os.getcwd() - if context and hasattr(context, "cwd"): + # Priority: + # 1. Environment variable PROJECT_DIR_ENV_VAR (set by agent on startup) + # 2. input_data cwd (passed by SDK in the tool call) + # 3. Context cwd (should be set by ClaudeSDKClient but sometimes isn't) + # 4. Current working directory (fallback, may be incorrect in worktree mode) + from .constants import PROJECT_DIR_ENV_VAR + + cwd = os.environ.get(PROJECT_DIR_ENV_VAR) + if not cwd: + cwd = input_data.get("cwd") + if not cwd and context and hasattr(context, "cwd"): cwd = context.cwd + if not cwd: + cwd = os.getcwd() # Get or create security profile # Note: In actual use, spec_dir would be passed through context diff --git a/apps/backend/security/parser.py b/apps/backend/security/parser.py index 1b8ead069a..1c51999866 100644 --- a/apps/backend/security/parser.py +++ b/apps/backend/security/parser.py @@ -4,11 +4,137 @@ Functions for parsing and extracting commands from shell command strings. Handles compound commands, pipes, subshells, and various shell constructs. + +Windows Compatibility Note: +-------------------------- +On Windows, commands containing paths with backslashes can cause shlex.split() +to fail (e.g., incomplete commands with unclosed quotes). This module includes +a fallback parser that extracts command names even from malformed commands, +ensuring security validation can still proceed. """ -import os import re import shlex +from pathlib import PurePosixPath, PureWindowsPath + + +def _cross_platform_basename(path: str) -> str: + """ + Extract the basename from a path in a cross-platform way. + + Handles both Windows paths (C:\\dir\\cmd.exe) and POSIX paths (/dir/cmd) + regardless of the current platform. This is critical for running tests + on Linux CI while handling Windows-style paths. + + Args: + path: A file path string (Windows or POSIX format) + + Returns: + The basename of the path (e.g., "python.exe" from "C:\\Python312\\python.exe") + """ + # Strip surrounding quotes if present + path = path.strip("'\"") + + # Check if this looks like a Windows path (contains backslash or drive letter) + if "\\" in path or (len(path) >= 2 and path[1] == ":"): + # Use PureWindowsPath to handle Windows paths on any platform + return PureWindowsPath(path).name + + # For POSIX paths or simple command names, use PurePosixPath + # (os.path.basename works but PurePosixPath is more explicit) + return PurePosixPath(path).name + + +def _fallback_extract_commands(command_string: str) -> list[str]: + """ + Fallback command extraction when shlex.split() fails. + + Uses regex to extract command names from potentially malformed commands. + This is more permissive than shlex but ensures we can at least identify + the commands being executed for security validation. + + Args: + command_string: The command string to parse + + Returns: + List of command names extracted from the string + """ + commands = [] + + # Shell keywords to skip + shell_keywords = { + "if", + "then", + "else", + "elif", + "fi", + "for", + "while", + "until", + "do", + "done", + "case", + "esac", + "in", + "function", + } + + # First, split by common shell operators + # This regex splits on &&, ||, |, ; while being careful about quotes + # We're being permissive here since shlex already failed + parts = re.split(r"\s*(?:&&|\|\||\|)\s*|;\s*", command_string) + + for part in parts: + part = part.strip() + if not part: + continue + + # Skip variable assignments at the start (VAR=value cmd) + while re.match(r"^[A-Za-z_][A-Za-z0-9_]*=\S*\s+", part): + part = re.sub(r"^[A-Za-z_][A-Za-z0-9_]*=\S*\s+", "", part) + + if not part: + continue + + # Strategy: Extract command from the BEGINNING of the part + # Handle various formats: + # - Simple: python3, npm, git + # - Unix path: /usr/bin/python + # - Windows path: C:\Python312\python.exe + # - Quoted with spaces: "C:\Program Files\python.exe" + + # Extract first token, handling quoted strings with spaces + first_token_match = re.match(r'^(?:"([^"]+)"|\'([^\']+)\'|([^\s]+))', part) + if not first_token_match: + continue + + # Pick whichever capture group matched (double-quoted, single-quoted, or unquoted) + first_token = ( + first_token_match.group(1) + or first_token_match.group(2) + or first_token_match.group(3) + ) + + # Now extract just the command name from this token + # Handle Windows paths (C:\dir\cmd.exe) and Unix paths (/dir/cmd) + # Use cross-platform basename for reliable path handling on any OS + cmd = _cross_platform_basename(first_token) + + # Remove Windows extensions + cmd = re.sub(r"\.(exe|cmd|bat|ps1|sh)$", "", cmd, flags=re.IGNORECASE) + + # Clean up any remaining quotes or special chars at the start + cmd = re.sub(r'^["\'\\/]+', "", cmd) + + # Skip tokens that look like function calls or code fragments (not shell commands) + # These appear when splitting on semicolons inside malformed quoted strings + if "(" in cmd or ")" in cmd or "." in cmd: + continue + + if cmd and cmd.lower() not in shell_keywords: + commands.append(cmd) + + return commands def split_command_segments(command_string: str) -> list[str]: @@ -32,13 +158,46 @@ def split_command_segments(command_string: str) -> list[str]: return result +def _contains_windows_path(command_string: str) -> bool: + """ + Check if a command string contains Windows-style paths. + + Windows paths with backslashes cause issues with shlex.split() because + backslashes are interpreted as escape characters in POSIX mode. + + Args: + command_string: The command string to check + + Returns: + True if Windows paths are detected + """ + # Pattern matches: + # - Drive letter paths: C:\, D:\, etc. + # - Backslash followed by a path component (2+ chars to avoid escape sequences like \n, \t) + # The second char must be alphanumeric, underscore, or another path separator + # This avoids false positives on escape sequences which are single-char after backslash + return bool(re.search(r"[A-Za-z]:\\|\\[A-Za-z][A-Za-z0-9_\\/]", command_string)) + + def extract_commands(command_string: str) -> list[str]: """ Extract command names from a shell command string. Handles pipes, command chaining (&&, ||, ;), and subshells. Returns the base command names (without paths). + + On Windows or when commands contain malformed quoting (common with + Windows paths in bash-style commands), falls back to regex-based + extraction to ensure security validation can proceed. """ + # If command contains Windows paths, use fallback parser directly + # because shlex.split() interprets backslashes as escape characters + if _contains_windows_path(command_string): + fallback_commands = _fallback_extract_commands(command_string) + if fallback_commands: + return fallback_commands + # Continue with shlex if fallback found nothing + commands = [] # Split on semicolons that aren't inside quotes @@ -53,7 +212,12 @@ def extract_commands(command_string: str) -> list[str]: tokens = shlex.split(segment) except ValueError: # Malformed command (unclosed quotes, etc.) - # Return empty to trigger block (fail-safe) + # This is common on Windows with backslash paths in quoted strings + # Use fallback parser instead of blocking + fallback_commands = _fallback_extract_commands(command_string) + if fallback_commands: + return fallback_commands + # If fallback also found nothing, return empty to trigger block return [] if not tokens: @@ -106,7 +270,8 @@ def extract_commands(command_string: str) -> list[str]: if expect_command: # Extract the base command name (handle paths like /usr/bin/python) - cmd = os.path.basename(token) + # Use cross-platform basename for Windows paths on Linux CI + cmd = _cross_platform_basename(token) commands.append(cmd) expect_command = False diff --git a/apps/backend/security/profile.py b/apps/backend/security/profile.py index da75cff174..a3087a65bb 100644 --- a/apps/backend/security/profile.py +++ b/apps/backend/security/profile.py @@ -9,11 +9,12 @@ from pathlib import Path from project_analyzer import ( - ProjectAnalyzer, SecurityProfile, get_or_create_profile, ) +from .constants import ALLOWLIST_FILENAME, PROFILE_FILENAME + # ============================================================================= # GLOBAL STATE # ============================================================================= @@ -23,18 +24,33 @@ _cached_project_dir: Path | None = None _cached_spec_dir: Path | None = None # Track spec directory for cache key _cached_profile_mtime: float | None = None # Track file modification time +_cached_allowlist_mtime: float | None = None # Track allowlist modification time def _get_profile_path(project_dir: Path) -> Path: """Get the security profile file path for a project.""" - return project_dir / ProjectAnalyzer.PROFILE_FILENAME + return project_dir / PROFILE_FILENAME + + +def _get_allowlist_path(project_dir: Path) -> Path: + """Get the allowlist file path for a project.""" + return project_dir / ALLOWLIST_FILENAME def _get_profile_mtime(project_dir: Path) -> float | None: """Get the modification time of the security profile file, or None if not exists.""" profile_path = _get_profile_path(project_dir) try: - return profile_path.stat().st_mtime if profile_path.exists() else None + return profile_path.stat().st_mtime + except OSError: + return None + + +def _get_allowlist_mtime(project_dir: Path) -> float | None: + """Get the modification time of the allowlist file, or None if not exists.""" + allowlist_path = _get_allowlist_path(project_dir) + try: + return allowlist_path.stat().st_mtime except OSError: return None @@ -49,6 +65,7 @@ def get_security_profile( - The project directory changes - The security profile file is created (was None, now exists) - The security profile file is modified (mtime changed) + - The allowlist file is created, modified, or deleted Args: project_dir: Project root directory @@ -57,7 +74,11 @@ def get_security_profile( Returns: SecurityProfile for the project """ - global _cached_profile, _cached_project_dir, _cached_spec_dir, _cached_profile_mtime + global _cached_profile + global _cached_project_dir + global _cached_spec_dir + global _cached_profile_mtime + global _cached_allowlist_mtime project_dir = Path(project_dir).resolve() resolved_spec_dir = Path(spec_dir).resolve() if spec_dir else None @@ -68,30 +89,40 @@ def get_security_profile( and _cached_project_dir == project_dir and _cached_spec_dir == resolved_spec_dir ): - # Check if file has been created or modified since caching - current_mtime = _get_profile_mtime(project_dir) - # Cache is valid if: - # - Both are None (file never existed and still doesn't) - # - Both have same mtime (file unchanged) - if current_mtime == _cached_profile_mtime: + # Check if files have been created or modified since caching + current_profile_mtime = _get_profile_mtime(project_dir) + current_allowlist_mtime = _get_allowlist_mtime(project_dir) + + # Cache is valid if both mtimes are unchanged + if ( + current_profile_mtime == _cached_profile_mtime + and current_allowlist_mtime == _cached_allowlist_mtime + ): return _cached_profile - # File was created or modified - invalidate cache - # (This happens when analyzer creates the file after agent starts) + # File was created, modified, or deleted - invalidate cache + # (This happens when analyzer creates the file after agent starts, + # or when user adds/updates the allowlist) # Analyze and cache _cached_profile = get_or_create_profile(project_dir, spec_dir) _cached_project_dir = project_dir _cached_spec_dir = resolved_spec_dir _cached_profile_mtime = _get_profile_mtime(project_dir) + _cached_allowlist_mtime = _get_allowlist_mtime(project_dir) return _cached_profile def reset_profile_cache() -> None: """Reset the cached profile (useful for testing or re-analysis).""" - global _cached_profile, _cached_project_dir, _cached_spec_dir, _cached_profile_mtime + global _cached_profile + global _cached_project_dir + global _cached_spec_dir + global _cached_profile_mtime + global _cached_allowlist_mtime _cached_profile = None _cached_project_dir = None _cached_spec_dir = None _cached_profile_mtime = None + _cached_allowlist_mtime = None diff --git a/apps/backend/security/validator.py b/apps/backend/security/validator.py index 7727f012fa..c1ca28983a 100644 --- a/apps/backend/security/validator.py +++ b/apps/backend/security/validator.py @@ -33,7 +33,11 @@ validate_init_script, validate_rm_command, ) -from .git_validators import validate_git_commit +from .git_validators import ( + validate_git_command, + validate_git_commit, + validate_git_config, +) from .process_validators import ( validate_kill_command, validate_killall_command, @@ -60,6 +64,8 @@ "validate_init_script", # Git validators "validate_git_commit", + "validate_git_command", + "validate_git_config", # Database validators "validate_dropdb_command", "validate_dropuser_command", diff --git a/apps/backend/spec/compaction.py b/apps/backend/spec/compaction.py index d74b377ce2..9538585ec3 100644 --- a/apps/backend/spec/compaction.py +++ b/apps/backend/spec/compaction.py @@ -16,7 +16,7 @@ async def summarize_phase_output( phase_name: str, phase_output: str, - model: str = "claude-sonnet-4-5-20250929", + model: str = "sonnet", # Shorthand - resolved via API Profile if configured target_words: int = 500, ) -> str: """ @@ -73,9 +73,12 @@ async def summarize_phase_output( await client.query(prompt) response_text = "" async for msg in client.receive_response(): - if hasattr(msg, "content"): + msg_type = type(msg).__name__ + if msg_type == "AssistantMessage" and hasattr(msg, "content"): for block in msg.content: - if hasattr(block, "text"): + # Must check block type - only TextBlock has .text attribute + block_type = type(block).__name__ + if block_type == "TextBlock" and hasattr(block, "text"): response_text += block.text return response_text.strip() except Exception as e: diff --git a/apps/backend/spec/pipeline/orchestrator.py b/apps/backend/spec/pipeline/orchestrator.py index 76c04d4719..3396f905bd 100644 --- a/apps/backend/spec/pipeline/orchestrator.py +++ b/apps/backend/spec/pipeline/orchestrator.py @@ -57,7 +57,7 @@ def __init__( spec_name: str | None = None, spec_dir: Path | None = None, # Use existing spec directory (for UI integration) - model: str = "claude-sonnet-4-5-20250929", + model: str = "sonnet", # Shorthand - resolved via API Profile if configured thinking_level: str = "medium", # Thinking level for extended thinking complexity_override: str | None = None, # Force a specific complexity use_ai_assessment: bool = True, # Use AI for complexity assessment (vs heuristics) @@ -173,10 +173,11 @@ async def _store_phase_summary(self, phase_name: str) -> None: return # Summarize the output + # Use sonnet shorthand - will resolve via API Profile if configured summary = await summarize_phase_output( phase_name, phase_output, - model="claude-sonnet-4-5-20250929", # Use Sonnet for efficiency + model="sonnet", target_words=500, ) diff --git a/apps/backend/task_logger/capture.py b/apps/backend/task_logger/capture.py index 346011e20f..f96d893f49 100644 --- a/apps/backend/task_logger/capture.py +++ b/apps/backend/task_logger/capture.py @@ -88,17 +88,20 @@ def process_message( inp = block.input if isinstance(inp, dict): # Extract meaningful input description + # Increased limits to avoid hiding critical information if "pattern" in inp: tool_input = f"pattern: {inp['pattern']}" elif "file_path" in inp: fp = inp["file_path"] - if len(fp) > 50: - fp = "..." + fp[-47:] + # Show last 200 chars for paths (enough for most file paths) + if len(fp) > 200: + fp = "..." + fp[-197:] tool_input = fp elif "command" in inp: cmd = inp["command"] - if len(cmd) > 50: - cmd = cmd[:47] + "..." + # Show first 300 chars for commands (enough for most commands) + if len(cmd) > 300: + cmd = cmd[:297] + "..." tool_input = cmd elif "path" in inp: tool_input = inp["path"] diff --git a/apps/backend/task_logger/logger.py b/apps/backend/task_logger/logger.py index 884bb90cea..954814464c 100644 --- a/apps/backend/task_logger/logger.py +++ b/apps/backend/task_logger/logger.py @@ -406,10 +406,10 @@ def tool_start( """ phase_key = (phase or self.current_phase or LogPhase.CODING).value - # Truncate long inputs for display + # Truncate long inputs for display (increased limit to avoid hiding critical info) display_input = tool_input - if display_input and len(display_input) > 100: - display_input = display_input[:97] + "..." + if display_input and len(display_input) > 300: + display_input = display_input[:297] + "..." entry = LogEntry( timestamp=self._timestamp(), @@ -462,10 +462,10 @@ def tool_end( """ phase_key = (phase or self.current_phase or LogPhase.CODING).value - # Truncate long results for display + # Truncate long results for display (increased limit to avoid hiding critical info) display_result = result - if display_result and len(display_result) > 100: - display_result = display_result[:97] + "..." + if display_result and len(display_result) > 300: + display_result = display_result[:297] + "..." status = "Done" if success else "Error" content = f"[{tool_name}] {status}" diff --git a/apps/backend/ui/boxes.py b/apps/backend/ui/boxes.py index 317c4a913f..27921ed29f 100644 --- a/apps/backend/ui/boxes.py +++ b/apps/backend/ui/boxes.py @@ -95,11 +95,54 @@ def box( for line in content: # Strip ANSI for length calculation visible_line = re.sub(r"\033\[[0-9;]*m", "", line) - padding = inner_width - len(visible_line) - 2 # -2 for padding spaces + visible_len = len(visible_line) + padding = inner_width - visible_len - 2 # -2 for padding spaces + if padding < 0: - # Truncate if too long - line = line[: inner_width - 5] + "..." - padding = 0 + # Line is too long - need to truncate intelligently + # Calculate how much to remove (visible characters only) + chars_to_remove = abs(padding) + 3 # +3 for "..." + target_len = visible_len - chars_to_remove + + if target_len <= 0: + # Line is way too long, just show "..." + line = "..." + padding = inner_width - 5 # 3 for "..." + 2 for padding + else: + # Truncate the visible text, preserving ANSI codes for what remains + # Split line into segments (ANSI code vs text) + segments = re.split(r"(\033\[[0-9;]*m)", line) + visible_chars = 0 + result_segments = [] + + for segment in segments: + if re.match(r"\033\[[0-9;]*m", segment): + # ANSI code - include it without counting + result_segments.append(segment) + else: + # Text segment - count visible characters + remaining_space = target_len - visible_chars + if remaining_space <= 0: + break + if len(segment) <= remaining_space: + result_segments.append(segment) + visible_chars += len(segment) + else: + # Truncate this segment at word boundary if possible + truncated = segment[:remaining_space] + # Try to truncate at last space to avoid mid-word cuts + last_space = truncated.rfind(" ") + if ( + last_space > remaining_space * 0.7 + ): # Only if space is in last 30% + truncated = truncated[:last_space] + result_segments.append(truncated) + visible_chars += len(truncated) + break + + line = "".join(result_segments) + "..." + padding = 0 + lines.append(v + " " + line + " " * (padding + 1) + v) # Bottom border diff --git a/apps/backend/ui/capabilities.py b/apps/backend/ui/capabilities.py index ac8de510d0..26390abbf5 100644 --- a/apps/backend/ui/capabilities.py +++ b/apps/backend/ui/capabilities.py @@ -13,6 +13,61 @@ import sys +def enable_windows_ansi_support() -> bool: + """ + Enable ANSI escape sequence support on Windows. + + Windows 10 (build 10586+) supports ANSI escape sequences natively, + but they must be explicitly enabled via the Windows API. + + Returns: + True if ANSI support was enabled, False otherwise + """ + if sys.platform != "win32": + return True # Non-Windows always has ANSI support + + try: + import ctypes + from ctypes import wintypes + + # Windows constants + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + + kernel32 = ctypes.windll.kernel32 + + # Get handles + for handle_id in (STD_OUTPUT_HANDLE, STD_ERROR_HANDLE): + handle = kernel32.GetStdHandle(handle_id) + if handle == -1: + continue + + # Get current console mode + mode = wintypes.DWORD() + if not kernel32.GetConsoleMode(handle, ctypes.byref(mode)): + continue + + # Enable ANSI support if not already enabled + if not (mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING): + kernel32.SetConsoleMode( + handle, mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING + ) + + return True + except (ImportError, AttributeError, OSError): + # Fall back to colorama if available + try: + import colorama + + colorama.init() + return True + except ImportError: + pass + + return False + + def configure_safe_encoding() -> None: """ Configure stdout/stderr to handle Unicode safely on Windows. @@ -54,8 +109,9 @@ def configure_safe_encoding() -> None: pass -# Configure safe encoding on module import +# Configure safe encoding and ANSI support on module import configure_safe_encoding() +WINDOWS_ANSI_ENABLED = enable_windows_ansi_support() def _is_fancy_ui_enabled() -> bool: diff --git a/apps/frontend/.env.example b/apps/frontend/.env.example index f01b56f27a..d5d246749d 100644 --- a/apps/frontend/.env.example +++ b/apps/frontend/.env.example @@ -19,6 +19,34 @@ # Shows detailed information about app update checks and downloads # DEBUG_UPDATER=true +# ============================================ +# SENTRY ERROR REPORTING +# ============================================ + +# Sentry DSN for anonymous error reporting +# If not set, error reporting is completely disabled (safe for forks) +# +# For official builds: Set in CI/CD secrets +# For local testing: Uncomment and add your DSN +# +# SENTRY_DSN=https://your-dsn@sentry.io/project-id + +# Force enable Sentry in development mode (normally disabled in dev) +# Only works when SENTRY_DSN is also set +# SENTRY_DEV=true + +# Trace sample rate for performance monitoring (0.0 to 1.0) +# Controls what percentage of transactions are sampled +# Default: 0.1 (10%) in production, 0 in development +# Set to 0 to disable performance monitoring entirely +# SENTRY_TRACES_SAMPLE_RATE=0.1 + +# Profile sample rate for profiling (0.0 to 1.0) +# Controls what percentage of sampled transactions include profiling data +# Default: 0.1 (10%) in production, 0 in development +# Set to 0 to disable profiling entirely +# SENTRY_PROFILES_SAMPLE_RATE=0.1 + # ============================================ # HOW TO USE # ============================================ diff --git a/apps/frontend/package-lock.json b/apps/frontend/package-lock.json index 9abc6c3090..e81abc2d9b 100644 --- a/apps/frontend/package-lock.json +++ b/apps/frontend/package-lock.json @@ -32,38 +32,38 @@ "@radix-ui/react-tooltip": "^1.2.8", "@tailwindcss/typography": "^0.5.19", "@tanstack/react-virtual": "^3.13.13", - "@xterm/addon-fit": "^0.11.0", - "@xterm/addon-serialize": "^0.14.0", - "@xterm/addon-web-links": "^0.12.0", - "@xterm/addon-webgl": "^0.19.0", - "@xterm/xterm": "^6.0.0", + "@xterm/addon-fit": "^0.10.0", + "@xterm/addon-serialize": "^0.13.0", + "@xterm/addon-web-links": "^0.11.0", + "@xterm/addon-webgl": "^0.18.0", + "@xterm/xterm": "^5.5.0", "chokidar": "^5.0.0", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "electron-log": "^5.4.3", "electron-updater": "^6.6.2", "i18next": "^25.7.3", - "lucide-react": "^0.562.0", + "lucide-react": "^0.560.0", "motion": "^12.23.26", "react": "^19.2.3", "react-dom": "^19.2.3", "react-i18next": "^16.5.0", "react-markdown": "^10.1.0", - "react-resizable-panels": "^4.2.0", + "react-resizable-panels": "^3.0.6", "remark-gfm": "^4.0.1", "semver": "^7.7.3", "tailwind-merge": "^3.4.0", "uuid": "^13.0.0", - "zod": "^4.2.1", "zustand": "^5.0.9" }, "devDependencies": { "@electron-toolkit/preload": "^3.0.2", "@electron-toolkit/utils": "^4.0.0", - "@electron/rebuild": "^4.0.2", + "@electron/rebuild": "^3.7.1", "@eslint/js": "^9.39.1", "@playwright/test": "^1.52.0", "@tailwindcss/postcss": "^4.1.17", + "@testing-library/jest-dom": "^6.9.1", "@testing-library/react": "^16.1.0", "@types/node": "^25.0.0", "@types/react": "^19.2.7", @@ -72,33 +72,32 @@ "@types/uuid": "^10.0.0", "@vitejs/plugin-react": "^5.1.2", "autoprefixer": "^10.4.22", - "cross-env": "^10.1.0", "electron": "^39.2.7", "electron-builder": "^26.0.12", "electron-vite": "^5.0.0", "eslint": "^9.39.1", "eslint-plugin-react": "^7.37.5", "eslint-plugin-react-hooks": "^7.0.1", - "globals": "^17.0.0", + "globals": "^16.5.0", "husky": "^9.1.7", - "jsdom": "^27.3.0", + "jsdom": "^26.0.0", "lint-staged": "^16.2.7", "postcss": "^8.5.6", "tailwindcss": "^4.1.17", "typescript": "^5.9.3", - "typescript-eslint": "^8.50.1", + "typescript-eslint": "^8.49.0", "vite": "^7.2.7", - "vitest": "^4.0.16" + "vitest": "^4.0.15" }, "engines": { "node": ">=24.0.0", "npm": ">=10.0.0" } }, - "node_modules/@acemir/cssom": { - "version": "0.9.30", - "resolved": "https://registry.npmjs.org/@acemir/cssom/-/cssom-0.9.30.tgz", - "integrity": "sha512-9CnlMCI0LmCIq0olalQqdWrJHPzm0/tw3gzOA9zJSgvFX7Xau3D24mAGa4BtwxwY69nsuJW6kQqqCzf/mEcQgg==", + "node_modules/@adobe/css-tools": { + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz", + "integrity": "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==", "dev": true, "license": "MIT" }, @@ -116,59 +115,25 @@ } }, "node_modules/@asamuzakjp/css-color": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.1.1.tgz", - "integrity": "sha512-B0Hv6G3gWGMn0xKJ0txEi/jM5iFpT3MfDxmhZFb4W047GvytCf1DHQ1D69W3zHI4yWe2aTZAA0JnbMZ7Xc8DuQ==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz", + "integrity": "sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==", "dev": true, "license": "MIT", "dependencies": { - "@csstools/css-calc": "^2.1.4", - "@csstools/css-color-parser": "^3.1.0", - "@csstools/css-parser-algorithms": "^3.0.5", - "@csstools/css-tokenizer": "^3.0.4", - "lru-cache": "^11.2.4" + "@csstools/css-calc": "^2.1.3", + "@csstools/css-color-parser": "^3.0.9", + "@csstools/css-parser-algorithms": "^3.0.4", + "@csstools/css-tokenizer": "^3.0.3", + "lru-cache": "^10.4.3" } }, "node_modules/@asamuzakjp/css-color/node_modules/lru-cache": { - "version": "11.2.4", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", - "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": "20 || >=22" - } - }, - "node_modules/@asamuzakjp/dom-selector": { - "version": "6.7.6", - "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.7.6.tgz", - "integrity": "sha512-hBaJER6A9MpdG3WgdlOolHmbOYvSk46y7IQN/1+iqiCuUu6iWdQrs9DGKF8ocqsEqWujWf/V7b7vaDgiUmIvUg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@asamuzakjp/nwsapi": "^2.3.9", - "bidi-js": "^1.0.3", - "css-tree": "^3.1.0", - "is-potential-custom-element-name": "^1.0.1", - "lru-cache": "^11.2.4" - } - }, - "node_modules/@asamuzakjp/dom-selector/node_modules/lru-cache": { - "version": "11.2.4", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", - "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": "20 || >=22" - } - }, - "node_modules/@asamuzakjp/nwsapi": { - "version": "2.3.9", - "resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz", - "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==", + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", "dev": true, - "license": "MIT" + "license": "ISC" }, "node_modules/@babel/code-frame": { "version": "7.27.1", @@ -592,26 +557,6 @@ "@csstools/css-tokenizer": "^3.0.4" } }, - "node_modules/@csstools/css-syntax-patches-for-csstree": { - "version": "1.0.22", - "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.22.tgz", - "integrity": "sha512-qBcx6zYlhleiFfdtzkRgwNC7VVoAwfK76Vmsw5t+PbvtdknO9StgRk7ROvq9so1iqbdW4uLIDAsXRsTfUrIoOw==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - } - }, "node_modules/@csstools/css-tokenizer": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", @@ -741,6 +686,28 @@ "node": ">=10.12.0" } }, + "node_modules/@electron/asar/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/@electron/asar/node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -785,29 +752,6 @@ "node": ">=10" } }, - "node_modules/@electron/fuses/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/fuses/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/@electron/get": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/@electron/get/-/get-2.0.3.tgz", @@ -830,6 +774,31 @@ "global-agent": "^3.0.0" } }, + "node_modules/@electron/get/node_modules/fs-extra": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", + "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + }, + "engines": { + "node": ">=6 <7 || >=8" + } + }, + "node_modules/@electron/get/node_modules/jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", + "dev": true, + "license": "MIT", + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, "node_modules/@electron/get/node_modules/semver": { "version": "6.3.1", "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", @@ -840,6 +809,16 @@ "semver": "bin/semver.js" } }, + "node_modules/@electron/get/node_modules/universalify": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.0.0" + } + }, "node_modules/@electron/node-gyp": { "version": "10.2.0-electron.1", "resolved": "git+ssh://git@github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2", @@ -865,581 +844,99 @@ "node": ">=12.13.0" } }, - "node_modules/@electron/node-gyp/node_modules/@npmcli/fs": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-2.1.2.tgz", - "integrity": "sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "@gar/promisify": "^1.1.3", - "semver": "^7.3.5" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", - "dev": true, - "license": "ISC" - }, - "node_modules/@electron/node-gyp/node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "node_modules/@electron/notarize": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@electron/notarize/-/notarize-2.5.0.tgz", + "integrity": "sha512-jNT8nwH1f9X5GEITXaQ8IF/KdskvIkOFfB2CvwumsveVidzpSc+mvhhTMdAGSYF3O+Nq49lJ7y+ssODRXu06+A==", "dev": true, "license": "MIT", "dependencies": { - "debug": "4" + "debug": "^4.1.1", + "fs-extra": "^9.0.1", + "promise-retry": "^2.0.1" }, "engines": { - "node": ">= 6.0.0" + "node": ">= 10.0.0" } }, - "node_modules/@electron/node-gyp/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "node_modules/@electron/notarize/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", "dev": true, "license": "MIT", "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/cacache": { - "version": "16.1.3", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.1.3.tgz", - "integrity": "sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "@npmcli/fs": "^2.1.0", - "@npmcli/move-file": "^2.0.0", - "chownr": "^2.0.0", - "fs-minipass": "^2.1.0", - "glob": "^8.0.1", - "infer-owner": "^1.0.4", - "lru-cache": "^7.7.1", - "minipass": "^3.1.6", - "minipass-collect": "^1.0.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "mkdirp": "^1.0.4", - "p-map": "^4.0.0", - "promise-inflight": "^1.0.1", - "rimraf": "^3.0.2", - "ssri": "^9.0.0", - "tar": "^6.1.11", - "unique-filename": "^2.0.0" + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" }, "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "node": ">=10" } }, - "node_modules/@electron/node-gyp/node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "node_modules/@electron/osx-sign": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@electron/osx-sign/-/osx-sign-1.3.1.tgz", + "integrity": "sha512-BAfviURMHpmb1Yb50YbCxnOY0wfwaLXH5KJ4+80zS0gUkzDX3ec23naTlEqKsN+PwYn+a1cCzM7BJ4Wcd3sGzw==", "dev": true, - "license": "ISC", + "license": "BSD-2-Clause", "dependencies": { - "minipass": "^3.0.0" + "compare-version": "^0.1.2", + "debug": "^4.3.4", + "fs-extra": "^10.0.0", + "isbinaryfile": "^4.0.8", + "minimist": "^1.2.6", + "plist": "^3.0.5" + }, + "bin": { + "electron-osx-flat": "bin/electron-osx-flat.js", + "electron-osx-sign": "bin/electron-osx-sign.js" }, "engines": { - "node": ">= 8" + "node": ">=12.0.0" } }, - "node_modules/@electron/node-gyp/node_modules/glob": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", - "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", - "deprecated": "Glob versions prior to v9 are no longer supported", + "node_modules/@electron/osx-sign/node_modules/isbinaryfile": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.10.tgz", + "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==", "dev": true, - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^5.0.1", - "once": "^1.3.0" - }, + "license": "MIT", "engines": { - "node": ">=12" + "node": ">= 8.0.0" }, "funding": { - "url": "https://github.com/sponsors/isaacs" + "url": "https://github.com/sponsors/gjtorikian/" } }, - "node_modules/@electron/node-gyp/node_modules/http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "node_modules/@electron/rebuild": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-3.7.2.tgz", + "integrity": "sha512-19/KbIR/DAxbsCkiaGMXIdPnMCJLkcf8AvGnduJtWBs/CBwiAjY1apCqOLVxrXg+rtXFCngbXhBanWjxLUt1Mg==", "dev": true, "license": "MIT", "dependencies": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" + "@electron/node-gyp": "git+https://github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2", + "@malept/cross-spawn-promise": "^2.0.0", + "chalk": "^4.0.0", + "debug": "^4.1.1", + "detect-libc": "^2.0.1", + "fs-extra": "^10.0.0", + "got": "^11.7.0", + "node-abi": "^3.45.0", + "node-api-version": "^0.2.0", + "ora": "^5.1.0", + "read-binary-file-arch": "^1.0.6", + "semver": "^7.3.5", + "tar": "^6.0.5", + "yargs": "^17.0.1" + }, + "bin": { + "electron-rebuild": "lib/cli.js" }, "engines": { - "node": ">= 6" - } - }, - "node_modules/@electron/node-gyp/node_modules/https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/@electron/node-gyp/node_modules/lru-cache": { - "version": "7.18.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", - "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/@electron/node-gyp/node_modules/make-fetch-happen": { - "version": "10.2.1", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz", - "integrity": "sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w==", - "dev": true, - "license": "ISC", - "dependencies": { - "agentkeepalive": "^4.2.1", - "cacache": "^16.1.0", - "http-cache-semantics": "^4.1.0", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.0", - "is-lambda": "^1.0.1", - "lru-cache": "^7.7.1", - "minipass": "^3.1.6", - "minipass-collect": "^1.0.2", - "minipass-fetch": "^2.0.3", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "promise-retry": "^2.0.1", - "socks-proxy-agent": "^7.0.0", - "ssri": "^9.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@electron/node-gyp/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@electron/node-gyp/node_modules/minipass-collect": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", - "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", - "dev": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@electron/node-gyp/node_modules/minipass-fetch": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.1.2.tgz", - "integrity": "sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA==", - "dev": true, - "license": "MIT", - "dependencies": { - "minipass": "^3.1.6", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - }, - "optionalDependencies": { - "encoding": "^0.1.13" - } - }, - "node_modules/@electron/node-gyp/node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dev": true, - "license": "MIT", - "dependencies": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@electron/node-gyp/node_modules/negotiator": { - "version": "0.6.4", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", - "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/@electron/node-gyp/node_modules/nopt": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-6.0.0.tgz", - "integrity": "sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==", - "dev": true, - "license": "ISC", - "dependencies": { - "abbrev": "^1.0.0" - }, - "bin": { - "nopt": "bin/nopt.js" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@electron/node-gyp/node_modules/proc-log": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-2.0.1.tgz", - "integrity": "sha512-Kcmo2FhfDTXdcbfDH76N7uBYHINxc/8GW7UAVuVP9I+Va3uHSerrnKV6dLooga/gh7GlgzuCCr/eoldnL1muGw==", - "dev": true, - "license": "ISC", - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@electron/node-gyp/node_modules/socks-proxy-agent": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", - "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "^6.0.2", - "debug": "^4.3.3", - "socks": "^2.6.2" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/@electron/node-gyp/node_modules/ssri": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-9.0.1.tgz", - "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==", - "dev": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.1.1" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/unique-filename": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-2.0.1.tgz", - "integrity": "sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==", - "dev": true, - "license": "ISC", - "dependencies": { - "unique-slug": "^3.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/unique-slug": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-3.0.0.tgz", - "integrity": "sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w==", - "dev": true, - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/@electron/node-gyp/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "license": "ISC" - }, - "node_modules/@electron/notarize": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@electron/notarize/-/notarize-2.5.0.tgz", - "integrity": "sha512-jNT8nwH1f9X5GEITXaQ8IF/KdskvIkOFfB2CvwumsveVidzpSc+mvhhTMdAGSYF3O+Nq49lJ7y+ssODRXu06+A==", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "^4.1.1", - "fs-extra": "^9.0.1", - "promise-retry": "^2.0.1" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@electron/notarize/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@electron/notarize/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/notarize/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@electron/osx-sign": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@electron/osx-sign/-/osx-sign-1.3.1.tgz", - "integrity": "sha512-BAfviURMHpmb1Yb50YbCxnOY0wfwaLXH5KJ4+80zS0gUkzDX3ec23naTlEqKsN+PwYn+a1cCzM7BJ4Wcd3sGzw==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "compare-version": "^0.1.2", - "debug": "^4.3.4", - "fs-extra": "^10.0.0", - "isbinaryfile": "^4.0.8", - "minimist": "^1.2.6", - "plist": "^3.0.5" - }, - "bin": { - "electron-osx-flat": "bin/electron-osx-flat.js", - "electron-osx-sign": "bin/electron-osx-sign.js" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/@electron/osx-sign/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@electron/osx-sign/node_modules/isbinaryfile": { - "version": "4.0.10", - "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.10.tgz", - "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/gjtorikian/" - } - }, - "node_modules/@electron/osx-sign/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/osx-sign/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@electron/rebuild": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-4.0.2.tgz", - "integrity": "sha512-8iZWVPvOpCdIc5Pj5udQV3PeO7liJVC7BBUSizl1HCfP7ZxYc9Kqz0c3PDNj2HQ5cQfJ5JaBeJIYKPjAvLn2Rg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@malept/cross-spawn-promise": "^2.0.0", - "debug": "^4.1.1", - "detect-libc": "^2.0.1", - "got": "^11.7.0", - "graceful-fs": "^4.2.11", - "node-abi": "^4.2.0", - "node-api-version": "^0.2.1", - "node-gyp": "^11.2.0", - "ora": "^5.1.0", - "read-binary-file-arch": "^1.0.6", - "semver": "^7.3.5", - "tar": "^6.0.5", - "yargs": "^17.0.1" - }, - "bin": { - "electron-rebuild": "lib/cli.js" - }, - "engines": { - "node": ">=22.12.0" + "node": ">=12.13.0" } }, "node_modules/@electron/universal": { @@ -1472,9 +969,9 @@ } }, "node_modules/@electron/universal/node_modules/fs-extra": { - "version": "11.3.3", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.3.tgz", - "integrity": "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==", + "version": "11.3.2", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz", + "integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==", "dev": true, "license": "MIT", "dependencies": { @@ -1486,19 +983,6 @@ "node": ">=14.14" } }, - "node_modules/@electron/universal/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, "node_modules/@electron/universal/node_modules/minimatch": { "version": "9.0.5", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", @@ -1515,16 +999,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/@electron/universal/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/@electron/windows-sign": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/@electron/windows-sign/-/windows-sign-1.2.2.tgz", @@ -1548,56 +1022,22 @@ } }, "node_modules/@electron/windows-sign/node_modules/fs-extra": { - "version": "11.3.3", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.3.tgz", - "integrity": "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=14.14" - } - }, - "node_modules/@electron/windows-sign/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@electron/windows-sign/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "version": "11.3.2", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz", + "integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==", "dev": true, "license": "MIT", "optional": true, "peer": true, + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, "engines": { - "node": ">= 10.0.0" + "node": ">=14.14" } }, - "node_modules/@epic-web/invariant": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@epic-web/invariant/-/invariant-1.0.0.tgz", - "integrity": "sha512-lrTPqgvfFQtR/eY/qkIzp98OGdNJu0m5ji3q/nJI8v3SXkRKEnWiOxMmbvcSoAIzv/cGiuvRy57k4suKQSAdwA==", - "dev": true, - "license": "MIT" - }, "node_modules/@esbuild/aix-ppc64": { "version": "0.25.12", "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", @@ -2041,9 +1481,9 @@ } }, "node_modules/@eslint-community/eslint-utils": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", - "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", "dev": true, "license": "MIT", "dependencies": { @@ -2223,24 +1663,6 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/@exodus/bytes": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@exodus/bytes/-/bytes-1.7.0.tgz", - "integrity": "sha512-5i+BtvujK/vM07YCGDyz4C4AyDzLmhxHMtM5HpUyPRtJPBdFPsj290ffXW+UXY21/G7GtXeHD2nRmq0T1ShyQQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" - }, - "peerDependencies": { - "@exodus/crypto": "^1.0.0-rc.4" - }, - "peerDependenciesMeta": { - "@exodus/crypto": { - "optional": true - } - } - }, "node_modules/@floating-ui/core": { "version": "1.7.3", "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz", @@ -2379,6 +1801,19 @@ "node": ">=12" } }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, "node_modules/@isaacs/cliui/node_modules/ansi-styles": { "version": "6.2.3", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", @@ -2417,6 +1852,22 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { "version": "8.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", @@ -2435,19 +1886,6 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/@isaacs/fs-minipass": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", - "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", - "dev": true, - "license": "ISC", - "dependencies": { - "minipass": "^7.0.4" - }, - "engines": { - "node": ">=18.0.0" - } - }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.13", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", @@ -2645,64 +2083,18 @@ "node": ">=10" } }, - "node_modules/@malept/flatpak-bundler/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@malept/flatpak-bundler/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/@npmcli/agent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@npmcli/agent/-/agent-3.0.0.tgz", - "integrity": "sha512-S79NdEgDQd/NGCay6TCoVzXSj74skRZIKJcpJjC5lOq34SZzyI6MqtiiWoiVWoVrTcGjNeC4ipbh1VIHlpfF5Q==", - "dev": true, - "license": "ISC", - "dependencies": { - "agent-base": "^7.1.0", - "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.1", - "lru-cache": "^10.0.1", - "socks-proxy-agent": "^8.0.3" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/@npmcli/agent/node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "dev": true, - "license": "ISC" - }, "node_modules/@npmcli/fs": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-4.0.0.tgz", - "integrity": "sha512-/xGlezI6xfGO9NwuJlnwz/K14qD1kCSAGtacBHnGzeAIuJGazcp45KP5NuyARXoKb7cwulAGWVsbeSxdG/cb0Q==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-2.1.2.tgz", + "integrity": "sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ==", "dev": true, "license": "ISC", "dependencies": { + "@gar/promisify": "^1.1.3", "semver": "^7.3.5" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/@npmcli/move-file": { @@ -2720,23 +2112,6 @@ "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/@npmcli/move-file/node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", @@ -3995,9 +3370,9 @@ "license": "MIT" }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.54.0.tgz", - "integrity": "sha512-OywsdRHrFvCdvsewAInDKCNyR3laPA2mc9bRYJ6LBp5IyvF3fvXbbNR0bSzHlZVFtn6E0xw2oZlyjg4rKCVcng==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.4.tgz", + "integrity": "sha512-PWU3Y92H4DD0bOqorEPp1Y0tbzwAurFmIYpjcObv5axGVOtcTlB0b2UKMd2echo08MgN7jO8WQZSSysvfisFSQ==", "cpu": [ "arm" ], @@ -4009,9 +3384,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.54.0.tgz", - "integrity": "sha512-Skx39Uv+u7H224Af+bDgNinitlmHyQX1K/atIA32JP3JQw6hVODX5tkbi2zof/E69M1qH2UoN3Xdxgs90mmNYw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.4.tgz", + "integrity": "sha512-Gw0/DuVm3rGsqhMGYkSOXXIx20cC3kTlivZeuaGt4gEgILivykNyBWxeUV5Cf2tDA2nPLah26vq3emlRrWVbng==", "cpu": [ "arm64" ], @@ -4023,9 +3398,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.54.0.tgz", - "integrity": "sha512-k43D4qta/+6Fq+nCDhhv9yP2HdeKeP56QrUUTW7E6PhZP1US6NDqpJj4MY0jBHlJivVJD5P8NxrjuobZBJTCRw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.4.tgz", + "integrity": "sha512-+w06QvXsgzKwdVg5qRLZpTHh1bigHZIqoIUPtiqh05ZiJVUQ6ymOxaPkXTvRPRLH88575ZCRSRM3PwIoNma01Q==", "cpu": [ "arm64" ], @@ -4037,9 +3412,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.54.0.tgz", - "integrity": "sha512-cOo7biqwkpawslEfox5Vs8/qj83M/aZCSSNIWpVzfU2CYHa2G3P1UN5WF01RdTHSgCkri7XOlTdtk17BezlV3A==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.4.tgz", + "integrity": "sha512-EB4Na9G2GsrRNRNFPuxfwvDRDUwQEzJPpiK1vo2zMVhEeufZ1k7J1bKnT0JYDfnPC7RNZ2H5YNQhW6/p2QKATw==", "cpu": [ "x64" ], @@ -4051,9 +3426,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.54.0.tgz", - "integrity": "sha512-miSvuFkmvFbgJ1BevMa4CPCFt5MPGw094knM64W9I0giUIMMmRYcGW/JWZDriaw/k1kOBtsWh1z6nIFV1vPNtA==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.4.tgz", + "integrity": "sha512-bldA8XEqPcs6OYdknoTMaGhjytnwQ0NClSPpWpmufOuGPN5dDmvIa32FygC2gneKK4A1oSx86V1l55hyUWUYFQ==", "cpu": [ "arm64" ], @@ -4065,9 +3440,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.54.0.tgz", - "integrity": "sha512-KGXIs55+b/ZfZsq9aR026tmr/+7tq6VG6MsnrvF4H8VhwflTIuYh+LFUlIsRdQSgrgmtM3fVATzEAj4hBQlaqQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.4.tgz", + "integrity": "sha512-3T8GPjH6mixCd0YPn0bXtcuSXi1Lj+15Ujw2CEb7dd24j9thcKscCf88IV7n76WaAdorOzAgSSbuVRg4C8V8Qw==", "cpu": [ "x64" ], @@ -4079,9 +3454,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.54.0.tgz", - "integrity": "sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.4.tgz", + "integrity": "sha512-UPMMNeC4LXW7ZSHxeP3Edv09aLsFUMaD1TSVW6n1CWMECnUIJMFFB7+XC2lZTdPtvB36tYC0cJWc86mzSsaviw==", "cpu": [ "arm" ], @@ -4093,9 +3468,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.54.0.tgz", - "integrity": "sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.4.tgz", + "integrity": "sha512-H8uwlV0otHs5Q7WAMSoyvjV9DJPiy5nJ/xnHolY0QptLPjaSsuX7tw+SPIfiYH6cnVx3fe4EWFafo6gH6ekZKA==", "cpu": [ "arm" ], @@ -4107,9 +3482,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.54.0.tgz", - "integrity": "sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.4.tgz", + "integrity": "sha512-BLRwSRwICXz0TXkbIbqJ1ibK+/dSBpTJqDClF61GWIrxTXZWQE78ROeIhgl5MjVs4B4gSLPCFeD4xML9vbzvCQ==", "cpu": [ "arm64" ], @@ -4121,9 +3496,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.54.0.tgz", - "integrity": "sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.4.tgz", + "integrity": "sha512-6bySEjOTbmVcPJAywjpGLckK793A0TJWSbIa0sVwtVGfe/Nz6gOWHOwkshUIAp9j7wg2WKcA4Snu7Y1nUZyQew==", "cpu": [ "arm64" ], @@ -4135,9 +3510,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.54.0.tgz", - "integrity": "sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.4.tgz", + "integrity": "sha512-U0ow3bXYJZ5MIbchVusxEycBw7bO6C2u5UvD31i5IMTrnt2p4Fh4ZbHSdc/31TScIJQYHwxbj05BpevB3201ug==", "cpu": [ "loong64" ], @@ -4149,9 +3524,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.54.0.tgz", - "integrity": "sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.4.tgz", + "integrity": "sha512-iujDk07ZNwGLVn0YIWM80SFN039bHZHCdCCuX9nyx3Jsa2d9V/0Y32F+YadzwbvDxhSeVo9zefkoPnXEImnM5w==", "cpu": [ "ppc64" ], @@ -4163,9 +3538,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.54.0.tgz", - "integrity": "sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.4.tgz", + "integrity": "sha512-MUtAktiOUSu+AXBpx1fkuG/Bi5rhlorGs3lw5QeJ2X3ziEGAq7vFNdWVde6XGaVqi0LGSvugwjoxSNJfHFTC0g==", "cpu": [ "riscv64" ], @@ -4177,9 +3552,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.54.0.tgz", - "integrity": "sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.4.tgz", + "integrity": "sha512-btm35eAbDfPtcFEgaXCI5l3c2WXyzwiE8pArhd66SDtoLWmgK5/M7CUxmUglkwtniPzwvWioBKKl6IXLbPf2sQ==", "cpu": [ "riscv64" ], @@ -4191,9 +3566,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.54.0.tgz", - "integrity": "sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.4.tgz", + "integrity": "sha512-uJlhKE9ccUTCUlK+HUz/80cVtx2RayadC5ldDrrDUFaJK0SNb8/cCmC9RhBhIWuZ71Nqj4Uoa9+xljKWRogdhA==", "cpu": [ "s390x" ], @@ -4205,9 +3580,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.54.0.tgz", - "integrity": "sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.4.tgz", + "integrity": "sha512-jjEMkzvASQBbzzlzf4os7nzSBd/cvPrpqXCUOqoeCh1dQ4BP3RZCJk8XBeik4MUln3m+8LeTJcY54C/u8wb3DQ==", "cpu": [ "x64" ], @@ -4219,9 +3594,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.54.0.tgz", - "integrity": "sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.4.tgz", + "integrity": "sha512-lu90KG06NNH19shC5rBPkrh6mrTpq5kviFylPBXQVpdEu0yzb0mDgyxLr6XdcGdBIQTH/UAhDJnL+APZTBu1aQ==", "cpu": [ "x64" ], @@ -4233,9 +3608,9 @@ ] }, "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.54.0.tgz", - "integrity": "sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.4.tgz", + "integrity": "sha512-dFDcmLwsUzhAm/dn0+dMOQZoONVYBtgik0VuY/d5IJUUb787L3Ko/ibvTvddqhb3RaB7vFEozYevHN4ox22R/w==", "cpu": [ "arm64" ], @@ -4247,9 +3622,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.54.0.tgz", - "integrity": "sha512-c2V0W1bsKIKfbLMBu/WGBz6Yci8nJ/ZJdheE0EwB73N3MvHYKiKGs3mVilX4Gs70eGeDaMqEob25Tw2Gb9Nqyw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.4.tgz", + "integrity": "sha512-WvUpUAWmUxZKtRnQWpRKnLW2DEO8HB/l8z6oFFMNuHndMzFTJEXzaYJ5ZAmzNw0L21QQJZsUQFt2oPf3ykAD/w==", "cpu": [ "arm64" ], @@ -4261,9 +3636,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.54.0.tgz", - "integrity": "sha512-woEHgqQqDCkAzrDhvDipnSirm5vxUXtSKDYTVpZG3nUdW/VVB5VdCYA2iReSj/u3yCZzXID4kuKG7OynPnB3WQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.4.tgz", + "integrity": "sha512-JGbeF2/FDU0x2OLySw/jgvkwWUo05BSiJK0dtuI4LyuXbz3wKiC1xHhLB1Tqm5VU6ZZDmAorj45r/IgWNWku5g==", "cpu": [ "ia32" ], @@ -4275,9 +3650,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.54.0.tgz", - "integrity": "sha512-dzAc53LOuFvHwbCEOS0rPbXp6SIhAf2txMP5p6mGyOXXw5mWY8NGGbPMPrs4P1WItkfApDathBj/NzMLUZ9rtQ==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.4.tgz", + "integrity": "sha512-zuuC7AyxLWLubP+mlUwEyR8M1ixW1ERNPHJfXm8x7eQNP4Pzkd7hS3qBuKBR70VRiQ04Kw8FNfRMF5TNxuZq2g==", "cpu": [ "x64" ], @@ -4289,9 +3664,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.54.0.tgz", - "integrity": "sha512-hYT5d3YNdSh3mbCU1gwQyPgQd3T2ne0A3KG8KSBdav5TiBg6eInVmV+TeR5uHufiIgSFg0XsOWGW5/RhNcSvPg==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.4.tgz", + "integrity": "sha512-Sbx45u/Lbb5RyptSbX7/3deP+/lzEmZ0BTSHxwxN/IMOZDZf8S0AGo0hJD5n/LQssxb5Z3B4og4P2X6Dd8acCA==", "cpu": [ "x64" ], @@ -4316,9 +3691,9 @@ } }, "node_modules/@standard-schema/spec": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", - "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz", + "integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==", "dev": true, "license": "MIT" }, @@ -4558,66 +3933,6 @@ "node": ">=14.0.0" } }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": { - "version": "1.7.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "@emnapi/wasi-threads": "1.1.0", - "tslib": "^2.4.0" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": { - "version": "1.7.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": { - "version": "1.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": { - "version": "1.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "@emnapi/core": "^1.7.1", - "@emnapi/runtime": "^1.7.1", - "@tybys/wasm-util": "^0.10.1" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": { - "version": "0.10.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, - "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": { - "version": "2.8.1", - "dev": true, - "inBundle": true, - "license": "0BSD", - "optional": true - }, "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { "version": "4.1.18", "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.18.tgz", @@ -4679,12 +3994,12 @@ } }, "node_modules/@tanstack/react-virtual": { - "version": "3.13.14", - "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.14.tgz", - "integrity": "sha512-WG0d7mBD54eA7dgA3+sO5csS0B49QKqM6Gy5Rf31+Oq/LTKROQSao9m2N/vz1IqVragOKU5t5k1LAcqh/DfTxw==", + "version": "3.13.13", + "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.13.tgz", + "integrity": "sha512-4o6oPMDvQv+9gMi8rE6gWmsOjtUZUYIJHv7EB+GblyYdi8U6OqLl8rhHWIUZSL1dUU2dPwTdTgybCKf9EjIrQg==", "license": "MIT", "dependencies": { - "@tanstack/virtual-core": "3.13.14" + "@tanstack/virtual-core": "3.13.13" }, "funding": { "type": "github", @@ -4696,9 +4011,9 @@ } }, "node_modules/@tanstack/virtual-core": { - "version": "3.13.14", - "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.14.tgz", - "integrity": "sha512-b5Uvd8J2dc7ICeX9SRb/wkCxWk7pUwN214eEPAQsqrsktSKTCmyLxOQWSMgogBByXclZeAdgZ3k4o0fIYUIBqQ==", + "version": "3.13.13", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.13.tgz", + "integrity": "sha512-uQFoSdKKf5S8k51W5t7b2qpfkyIbdHMzAn+AMQvHPxKUPeo1SsGaA4JRISQT87jm28b7z8OEqPcg1IOZagQHcA==", "license": "MIT", "funding": { "type": "github", @@ -4726,6 +4041,33 @@ "node": ">=18" } }, + "node_modules/@testing-library/jest-dom": { + "version": "6.9.1", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.9.1.tgz", + "integrity": "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "^4.4.0", + "aria-query": "^5.0.0", + "css.escape": "^1.5.1", + "dom-accessibility-api": "^0.6.3", + "picocolors": "^1.1.1", + "redent": "^3.0.0" + }, + "engines": { + "node": ">=14", + "npm": ">=6", + "yarn": ">=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz", + "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", + "dev": true, + "license": "MIT" + }, "node_modules/@testing-library/react": { "version": "16.3.1", "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.1.tgz", @@ -4931,9 +4273,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "25.0.3", - "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.3.tgz", - "integrity": "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA==", + "version": "25.0.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.2.tgz", + "integrity": "sha512-gWEkeiyYE4vqjON/+Obqcoeffmk0NF15WSBwSs7zwVA2bAbTaE0SJ7P0WNGoJn8uE7fiaV5a7dKYIJriEqOrmA==", "dev": true, "license": "MIT", "dependencies": { @@ -5021,20 +4363,20 @@ } }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.51.0.tgz", - "integrity": "sha512-XtssGWJvypyM2ytBnSnKtHYOGT+4ZwTnBVl36TA4nRO2f4PRNGz5/1OszHzcZCvcBMh+qb7I06uoCmLTRdR9og==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.49.0.tgz", + "integrity": "sha512-JXij0vzIaTtCwu6SxTh8qBc66kmf1xs7pI4UOiMDFVct6q86G0Zs7KRcEoJgY3Cav3x5Tq0MF5jwgpgLqgKG3A==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.51.0", - "@typescript-eslint/type-utils": "8.51.0", - "@typescript-eslint/utils": "8.51.0", - "@typescript-eslint/visitor-keys": "8.51.0", + "@typescript-eslint/scope-manager": "8.49.0", + "@typescript-eslint/type-utils": "8.49.0", + "@typescript-eslint/utils": "8.49.0", + "@typescript-eslint/visitor-keys": "8.49.0", "ignore": "^7.0.0", "natural-compare": "^1.4.0", - "ts-api-utils": "^2.2.0" + "ts-api-utils": "^2.1.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5044,7 +4386,7 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@typescript-eslint/parser": "^8.51.0", + "@typescript-eslint/parser": "^8.49.0", "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } @@ -5060,16 +4402,16 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.51.0.tgz", - "integrity": "sha512-3xP4XzzDNQOIqBMWogftkwxhg5oMKApqY0BAflmLZiFYHqyhSOxv/cd/zPQLTcCXr4AkaKb25joocY0BD1WC6A==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.49.0.tgz", + "integrity": "sha512-N9lBGA9o9aqb1hVMc9hzySbhKibHmB+N3IpoShyV6HyQYRGIhlrO5rQgttypi+yEeKsKI4idxC8Jw6gXKD4THA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/scope-manager": "8.51.0", - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/typescript-estree": "8.51.0", - "@typescript-eslint/visitor-keys": "8.51.0", + "@typescript-eslint/scope-manager": "8.49.0", + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/typescript-estree": "8.49.0", + "@typescript-eslint/visitor-keys": "8.49.0", "debug": "^4.3.4" }, "engines": { @@ -5085,14 +4427,14 @@ } }, "node_modules/@typescript-eslint/project-service": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.51.0.tgz", - "integrity": "sha512-Luv/GafO07Z7HpiI7qeEW5NW8HUtZI/fo/kE0YbtQEFpJRUuR0ajcWfCE5bnMvL7QQFrmT/odMe8QZww8X2nfQ==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.49.0.tgz", + "integrity": "sha512-/wJN0/DKkmRUMXjZUXYZpD1NEQzQAAn9QWfGwo+Ai8gnzqH7tvqS7oNVdTjKqOcPyVIdZdyCMoqN66Ia789e7g==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/tsconfig-utils": "^8.51.0", - "@typescript-eslint/types": "^8.51.0", + "@typescript-eslint/tsconfig-utils": "^8.49.0", + "@typescript-eslint/types": "^8.49.0", "debug": "^4.3.4" }, "engines": { @@ -5107,14 +4449,14 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.51.0.tgz", - "integrity": "sha512-JhhJDVwsSx4hiOEQPeajGhCWgBMBwVkxC/Pet53EpBVs7zHHtayKefw1jtPaNRXpI9RA2uocdmpdfE7T+NrizA==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.49.0.tgz", + "integrity": "sha512-npgS3zi+/30KSOkXNs0LQXtsg9ekZ8OISAOLGWA/ZOEn0ZH74Ginfl7foziV8DT+D98WfQ5Kopwqb/PZOaIJGg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/visitor-keys": "8.51.0" + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/visitor-keys": "8.49.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5125,9 +4467,9 @@ } }, "node_modules/@typescript-eslint/tsconfig-utils": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.51.0.tgz", - "integrity": "sha512-Qi5bSy/vuHeWyir2C8u/uqGMIlIDu8fuiYWv48ZGlZ/k+PRPHtaAu7erpc7p5bzw2WNNSniuxoMSO4Ar6V9OXw==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.49.0.tgz", + "integrity": "sha512-8prixNi1/6nawsRYxet4YOhnbW+W9FK/bQPxsGB1D3ZrDzbJ5FXw5XmzxZv82X3B+ZccuSxo/X8q9nQ+mFecWA==", "dev": true, "license": "MIT", "engines": { @@ -5142,17 +4484,17 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.51.0.tgz", - "integrity": "sha512-0XVtYzxnobc9K0VU7wRWg1yiUrw4oQzexCG2V2IDxxCxhqBMSMbjB+6o91A+Uc0GWtgjCa3Y8bi7hwI0Tu4n5Q==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.49.0.tgz", + "integrity": "sha512-KTExJfQ+svY8I10P4HdxKzWsvtVnsuCifU5MvXrRwoP2KOlNZ9ADNEWWsQTJgMxLzS5VLQKDjkCT/YzgsnqmZg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/typescript-estree": "8.51.0", - "@typescript-eslint/utils": "8.51.0", + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/typescript-estree": "8.49.0", + "@typescript-eslint/utils": "8.49.0", "debug": "^4.3.4", - "ts-api-utils": "^2.2.0" + "ts-api-utils": "^2.1.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5167,9 +4509,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.51.0.tgz", - "integrity": "sha512-TizAvWYFM6sSscmEakjY3sPqGwxZRSywSsPEiuZF6d5GmGD9Gvlsv0f6N8FvAAA0CD06l3rIcWNbsN1e5F/9Ag==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.49.0.tgz", + "integrity": "sha512-e9k/fneezorUo6WShlQpMxXh8/8wfyc+biu6tnAqA81oWrEic0k21RHzP9uqqpyBBeBKu4T+Bsjy9/b8u7obXQ==", "dev": true, "license": "MIT", "engines": { @@ -5181,21 +4523,21 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.51.0.tgz", - "integrity": "sha512-1qNjGqFRmlq0VW5iVlcyHBbCjPB7y6SxpBkrbhNWMy/65ZoncXCEPJxkRZL8McrseNH6lFhaxCIaX+vBuFnRng==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.49.0.tgz", + "integrity": "sha512-jrLdRuAbPfPIdYNppHJ/D0wN+wwNfJ32YTAm10eJVsFmrVpXQnDWBn8niCSMlWjvml8jsce5E/O+86IQtTbJWA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/project-service": "8.51.0", - "@typescript-eslint/tsconfig-utils": "8.51.0", - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/visitor-keys": "8.51.0", + "@typescript-eslint/project-service": "8.49.0", + "@typescript-eslint/tsconfig-utils": "8.49.0", + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/visitor-keys": "8.49.0", "debug": "^4.3.4", "minimatch": "^9.0.4", "semver": "^7.6.0", "tinyglobby": "^0.2.15", - "ts-api-utils": "^2.2.0" + "ts-api-utils": "^2.1.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5235,16 +4577,16 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.51.0.tgz", - "integrity": "sha512-11rZYxSe0zabiKaCP2QAwRf/dnmgFgvTmeDTtZvUvXG3UuAdg/GU02NExmmIXzz3vLGgMdtrIosI84jITQOxUA==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.49.0.tgz", + "integrity": "sha512-N3W7rJw7Rw+z1tRsHZbK395TWSYvufBXumYtEGzypgMUthlg0/hmCImeA8hgO2d2G4pd7ftpxxul2J8OdtdaFA==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", - "@typescript-eslint/scope-manager": "8.51.0", - "@typescript-eslint/types": "8.51.0", - "@typescript-eslint/typescript-estree": "8.51.0" + "@typescript-eslint/scope-manager": "8.49.0", + "@typescript-eslint/types": "8.49.0", + "@typescript-eslint/typescript-estree": "8.49.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5259,13 +4601,13 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.51.0.tgz", - "integrity": "sha512-mM/JRQOzhVN1ykejrvwnBRV3+7yTKK8tVANVN3o1O0t0v7o+jqdVu9crPy5Y9dov15TJk/FTIgoUGHrTOVL3Zg==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.49.0.tgz", + "integrity": "sha512-LlKaciDe3GmZFphXIc79THF/YYBugZ7FS1pO581E/edlVVNbZKDy93evqmrfQ9/Y4uN0vVhX4iuchq26mK/iiA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.51.0", + "@typescript-eslint/types": "8.49.0", "eslint-visitor-keys": "^4.2.1" }, "engines": { @@ -5304,16 +4646,16 @@ } }, "node_modules/@vitest/expect": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.16.tgz", - "integrity": "sha512-eshqULT2It7McaJkQGLkPjPjNph+uevROGuIMJdG3V+0BSR2w9u6J9Lwu+E8cK5TETlfou8GRijhafIMhXsimA==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.15.tgz", + "integrity": "sha512-Gfyva9/GxPAWXIWjyGDli9O+waHDC0Q0jaLdFP1qPAUUfo1FEXPXUfUkp3eZA0sSq340vPycSyOlYUeM15Ft1w==", "dev": true, "license": "MIT", "dependencies": { "@standard-schema/spec": "^1.0.0", "@types/chai": "^5.2.2", - "@vitest/spy": "4.0.16", - "@vitest/utils": "4.0.16", + "@vitest/spy": "4.0.15", + "@vitest/utils": "4.0.15", "chai": "^6.2.1", "tinyrainbow": "^3.0.3" }, @@ -5322,13 +4664,13 @@ } }, "node_modules/@vitest/mocker": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.16.tgz", - "integrity": "sha512-yb6k4AZxJTB+q9ycAvsoxGn+j/po0UaPgajllBgt1PzoMAAmJGYFdDk0uCcRcxb3BrME34I6u8gHZTQlkqSZpg==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.15.tgz", + "integrity": "sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/spy": "4.0.16", + "@vitest/spy": "4.0.15", "estree-walker": "^3.0.3", "magic-string": "^0.30.21" }, @@ -5349,9 +4691,9 @@ } }, "node_modules/@vitest/pretty-format": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.16.tgz", - "integrity": "sha512-eNCYNsSty9xJKi/UdVD8Ou16alu7AYiS2fCPRs0b1OdhJiV89buAXQLpTbe+X8V9L6qrs9CqyvU7OaAopJYPsA==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.15.tgz", + "integrity": "sha512-SWdqR8vEv83WtZcrfLNqlqeQXlQLh2iilO1Wk1gv4eiHKjEzvgHb2OVc3mIPyhZE6F+CtfYjNlDJwP5MN6Km7A==", "dev": true, "license": "MIT", "dependencies": { @@ -5362,13 +4704,13 @@ } }, "node_modules/@vitest/runner": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.16.tgz", - "integrity": "sha512-VWEDm5Wv9xEo80ctjORcTQRJ539EGPB3Pb9ApvVRAY1U/WkHXmmYISqU5E79uCwcW7xYUV38gwZD+RV755fu3Q==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.15.tgz", + "integrity": "sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/utils": "4.0.16", + "@vitest/utils": "4.0.15", "pathe": "^2.0.3" }, "funding": { @@ -5376,13 +4718,13 @@ } }, "node_modules/@vitest/snapshot": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.16.tgz", - "integrity": "sha512-sf6NcrYhYBsSYefxnry+DR8n3UV4xWZwWxYbCJUt2YdvtqzSPR7VfGrY0zsv090DAbjFZsi7ZaMi1KnSRyK1XA==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.15.tgz", + "integrity": "sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "4.0.16", + "@vitest/pretty-format": "4.0.15", "magic-string": "^0.30.21", "pathe": "^2.0.3" }, @@ -5391,9 +4733,9 @@ } }, "node_modules/@vitest/spy": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.16.tgz", - "integrity": "sha512-4jIOWjKP0ZUaEmJm00E0cOBLU+5WE0BpeNr3XN6TEF05ltro6NJqHWxXD0kA8/Zc8Nh23AT8WQxwNG+WeROupw==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.15.tgz", + "integrity": "sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==", "dev": true, "license": "MIT", "funding": { @@ -5401,13 +4743,13 @@ } }, "node_modules/@vitest/utils": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.16.tgz", - "integrity": "sha512-h8z9yYhV3e1LEfaQ3zdypIrnAg/9hguReGZoS7Gl0aBG5xgA410zBqECqmaF/+RkTggRsfnzc1XaAHA6bmUufA==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.15.tgz", + "integrity": "sha512-HXjPW2w5dxhTD0dLwtYHDnelK3j8sR8cWIaLxr22evTyY6q8pRCjZSmhRWVjBaOVXChQd6AwMzi9pucorXCPZA==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "4.0.16", + "@vitest/pretty-format": "4.0.15", "tinyrainbow": "^3.0.3" }, "funding": { @@ -5425,37 +4767,47 @@ } }, "node_modules/@xterm/addon-fit": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.11.0.tgz", - "integrity": "sha512-jYcgT6xtVYhnhgxh3QgYDnnNMYTcf8ElbxxFzX0IZo+vabQqSPAjC3c1wJrKB5E19VwQei89QCiZZP86DCPF7g==", - "license": "MIT" + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.10.0.tgz", + "integrity": "sha512-UFYkDm4HUahf2lnEyHvio51TNGiLK66mqP2JoATy7hRZeXaGMRDr00JiSF7m63vR5WKATF605yEggJKsw0JpMQ==", + "license": "MIT", + "peerDependencies": { + "@xterm/xterm": "^5.0.0" + } }, "node_modules/@xterm/addon-serialize": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@xterm/addon-serialize/-/addon-serialize-0.14.0.tgz", - "integrity": "sha512-uteyTU1EkrQa2Ux6P/uFl2fzmXI46jy5uoQMKEOM0fKTyiW7cSn0WrFenHm5vO5uEXX/GpwW/FgILvv3r0WbkA==", - "license": "MIT" + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-serialize/-/addon-serialize-0.13.0.tgz", + "integrity": "sha512-kGs8o6LWAmN1l2NpMp01/YkpxbmO4UrfWybeGu79Khw5K9+Krp7XhXbBTOTc3GJRRhd6EmILjpR8k5+odY39YQ==", + "license": "MIT", + "peerDependencies": { + "@xterm/xterm": "^5.0.0" + } }, "node_modules/@xterm/addon-web-links": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@xterm/addon-web-links/-/addon-web-links-0.12.0.tgz", - "integrity": "sha512-4Smom3RPyVp7ZMYOYDoC/9eGJJJqYhnPLGGqJ6wOBfB8VxPViJNSKdgRYb8NpaM6YSelEKbA2SStD7lGyqaobw==", - "license": "MIT" + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-web-links/-/addon-web-links-0.11.0.tgz", + "integrity": "sha512-nIHQ38pQI+a5kXnRaTgwqSHnX7KE6+4SVoceompgHL26unAxdfP6IPqUTSYPQgSwM56hsElfoNrrW5V7BUED/Q==", + "license": "MIT", + "peerDependencies": { + "@xterm/xterm": "^5.0.0" + } }, "node_modules/@xterm/addon-webgl": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/@xterm/addon-webgl/-/addon-webgl-0.19.0.tgz", - "integrity": "sha512-b3fMOsyLVuCeNJWxolACEUED0vm7qC0cy4wRvf3oURSzDTYVQiGPhTnhWZwIHdvC48Y+oLhvYXnY4XDXPoJo6A==", - "license": "MIT" + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-webgl/-/addon-webgl-0.18.0.tgz", + "integrity": "sha512-xCnfMBTI+/HKPdRnSOHaJDRqEpq2Ugy8LEj9GiY4J3zJObo3joylIFaMvzBwbYRg8zLtkO0KQaStCeSfoaI2/w==", + "license": "MIT", + "peerDependencies": { + "@xterm/xterm": "^5.0.0" + } }, "node_modules/@xterm/xterm": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-6.0.0.tgz", - "integrity": "sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg==", + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-5.5.0.tgz", + "integrity": "sha512-hqJHYaQb5OptNunnyAnkHyM8aCjZ1MEIDTQu1iIbbTD/xops91NB5yq1ZK/dC2JDbVWtF23zUtl9JE2NqwT87A==", "license": "MIT", - "workspaces": [ - "addons/*" - ] + "peer": true }, "node_modules/7zip-bin": { "version": "5.2.0", @@ -5465,14 +4817,11 @@ "license": "MIT" }, "node_modules/abbrev": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz", - "integrity": "sha512-AO2ac6pjRB3SJmGJo+v5/aK6Omggp6fsLrs6wN9bd35ulu4cCwaAU9+7ZhXjeqHVkaHThLuzH0nZr0YpCDhygg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", "dev": true, - "license": "ISC", - "engines": { - "node": "^18.17.0 || >=20.5.0" - } + "license": "ISC" }, "node_modules/acorn": { "version": "8.15.0", @@ -5680,63 +5029,12 @@ "semver": "^7.3.5", "tar": "^6.0.5", "yargs": "^17.0.1" - }, - "bin": { - "electron-rebuild": "lib/cli.js" - }, - "engines": { - "node": ">=12.13.0" - } - }, - "node_modules/app-builder-lib/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/app-builder-lib/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/app-builder-lib/node_modules/node-abi": { - "version": "3.85.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.85.0.tgz", - "integrity": "sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==", - "dev": true, - "license": "MIT", - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/app-builder-lib/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", + }, + "bin": { + "electron-rebuild": "lib/cli.js" + }, "engines": { - "node": ">= 10.0.0" + "node": ">=12.13.0" } }, "node_modules/argparse": { @@ -6074,25 +5372,15 @@ "license": "MIT" }, "node_modules/baseline-browser-mapping": { - "version": "2.9.11", - "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.11.tgz", - "integrity": "sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==", + "version": "2.9.7", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.7.tgz", + "integrity": "sha512-k9xFKplee6KIio3IDbwj+uaCLpqzOwakOgmqzPezM0sFJlFKcg30vk2wOiAJtkTSfx0SSQDSe8q+mWA/fSH5Zg==", "dev": true, "license": "Apache-2.0", "bin": { "baseline-browser-mapping": "dist/cli.js" } }, - "node_modules/bidi-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz", - "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", - "dev": true, - "license": "MIT", - "dependencies": { - "require-from-string": "^2.0.2" - } - }, "node_modules/bl": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", @@ -6253,44 +5541,6 @@ "node": ">=12.0.0" } }, - "node_modules/builder-util/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/builder-util/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/builder-util/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/cac": { "version": "6.7.14", "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", @@ -6302,118 +5552,43 @@ } }, "node_modules/cacache": { - "version": "19.0.1", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-19.0.1.tgz", - "integrity": "sha512-hdsUxulXCi5STId78vRVYEtDAjq99ICAUktLTeTYsLoTE6Z8dS0c8pWNCxwdrk9YfJeobDZc2Y186hD/5ZQgFQ==", + "version": "16.1.3", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.1.3.tgz", + "integrity": "sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ==", "dev": true, "license": "ISC", "dependencies": { - "@npmcli/fs": "^4.0.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^10.0.1", - "minipass": "^7.0.3", - "minipass-collect": "^2.0.1", + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", + "infer-owner": "^1.0.4", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", - "p-map": "^7.0.2", - "ssri": "^12.0.0", - "tar": "^7.4.3", - "unique-filename": "^4.0.0" + "mkdirp": "^1.0.4", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", + "unique-filename": "^2.0.0" }, "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/cacache/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/cacache/node_modules/chownr": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", - "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/cacache/node_modules/glob": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", - "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/cacache/node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/cacache/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", "dev": true, "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/cacache/node_modules/tar": { - "version": "7.5.2", - "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.2.tgz", - "integrity": "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/fs-minipass": "^4.0.0", - "chownr": "^3.0.0", - "minipass": "^7.1.2", - "minizlib": "^3.1.0", - "yallist": "^5.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/cacache/node_modules/yallist": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", - "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", - "dev": true, - "license": "BlueOak-1.0.0", "engines": { - "node": ">=18" + "node": ">=12" } }, "node_modules/cacheable-lookup": { @@ -6506,9 +5681,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001762", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001762.tgz", - "integrity": "sha512-PxZwGNvH7Ak8WX5iXzoK1KPZttBXNPuaOvI2ZYU7NrlM+d9Ov+TUvlLOBNGzVXAntMSMMlJPd+jY6ovrVjSmUw==", + "version": "1.0.30001760", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001760.tgz", + "integrity": "sha512-7AAMPcueWELt1p3mi13HR/LHH0TJLT11cnwDJEs3xA4+CK/PLKeO9Kl1oru24htkyUKtkGCvAx4ohB0Ttry8Dw==", "dev": true, "funding": [ { @@ -6537,9 +5712,9 @@ } }, "node_modules/chai": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", - "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.1.tgz", + "integrity": "sha512-p4Z49OGG5W/WBCPSS/dH3jQ73kD6tiMmUM+bckNK6Jr5JHMG3k9bg/BvKR8lKmtVBKmOiuVaV2ws8s9oSbwysg==", "dev": true, "license": "MIT", "engines": { @@ -6674,19 +5849,16 @@ } }, "node_modules/cli-cursor": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz", - "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", "dev": true, "license": "MIT", "dependencies": { - "restore-cursor": "^5.0.0" + "restore-cursor": "^3.1.0" }, "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, "node_modules/cli-spinners": { @@ -6735,37 +5907,6 @@ "node": ">=12" } }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, "node_modules/clone": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", @@ -6933,6 +6074,16 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/config-file-ts/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/convert-source-map": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", @@ -6968,24 +6119,6 @@ "optional": true, "peer": true }, - "node_modules/cross-env": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/cross-env/-/cross-env-10.1.0.tgz", - "integrity": "sha512-GsYosgnACZTADcmEyJctkJIoqAhHjttw7RsFrVoJNXbsWWqaq6Ym+7kZjq6mS45O0jij6vtiReppKQEtqWy6Dw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@epic-web/invariant": "^1.0.0", - "cross-spawn": "^7.0.6" - }, - "bin": { - "cross-env": "dist/bin/cross-env.js", - "cross-env-shell": "dist/bin/cross-env-shell.js" - }, - "engines": { - "node": ">=20" - } - }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -7001,19 +6134,12 @@ "node": ">= 8" } }, - "node_modules/css-tree": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz", - "integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==", + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", "dev": true, - "license": "MIT", - "dependencies": { - "mdn-data": "2.12.2", - "source-map-js": "^1.0.1" - }, - "engines": { - "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" - } + "license": "MIT" }, "node_modules/cssesc": { "version": "3.0.0", @@ -7028,29 +6154,17 @@ } }, "node_modules/cssstyle": { - "version": "5.3.6", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.6.tgz", - "integrity": "sha512-legscpSpgSAeGEe0TNcai97DKt9Vd9AsAdOL7Uoetb52Ar/8eJm3LIa39qpv8wWzLFlNG4vVvppQM+teaMPj3A==", + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.6.0.tgz", + "integrity": "sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==", "dev": true, "license": "MIT", "dependencies": { - "@asamuzakjp/css-color": "^4.1.1", - "@csstools/css-syntax-patches-for-csstree": "^1.0.21", - "css-tree": "^3.1.0", - "lru-cache": "^11.2.4" + "@asamuzakjp/css-color": "^3.2.0", + "rrweb-cssom": "^0.8.0" }, "engines": { - "node": ">=20" - } - }, - "node_modules/cssstyle/node_modules/lru-cache": { - "version": "11.2.4", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", - "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": "20 || >=22" + "node": ">=18" } }, "node_modules/csstype": { @@ -7060,17 +6174,17 @@ "license": "MIT" }, "node_modules/data-urls": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-6.0.0.tgz", - "integrity": "sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz", + "integrity": "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==", "dev": true, "license": "MIT", "dependencies": { "whatwg-mimetype": "^4.0.0", - "whatwg-url": "^15.0.0" + "whatwg-url": "^14.0.0" }, "engines": { - "node": ">=20" + "node": ">=18" } }, "node_modules/data-view-buffer": { @@ -7336,63 +6450,25 @@ "brace-expansion": "^1.1.7" }, "engines": { - "node": "*" - } - }, - "node_modules/dmg-builder": { - "version": "26.0.12", - "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-26.0.12.tgz", - "integrity": "sha512-59CAAjAhTaIMCN8y9kD573vDkxbs1uhDcrFLHSgutYdPcGOU35Rf95725snvzEOy4BFB7+eLJ8djCNPmGwG67w==", - "dev": true, - "license": "MIT", - "dependencies": { - "app-builder-lib": "26.0.12", - "builder-util": "26.0.11", - "builder-util-runtime": "9.3.1", - "fs-extra": "^10.1.0", - "iconv-lite": "^0.6.2", - "js-yaml": "^4.1.0" - }, - "optionalDependencies": { - "dmg-license": "^1.0.11" - } - }, - "node_modules/dmg-builder/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" + "node": "*" } }, - "node_modules/dmg-builder/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "node_modules/dmg-builder": { + "version": "26.0.12", + "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-26.0.12.tgz", + "integrity": "sha512-59CAAjAhTaIMCN8y9kD573vDkxbs1uhDcrFLHSgutYdPcGOU35Rf95725snvzEOy4BFB7+eLJ8djCNPmGwG67w==", "dev": true, "license": "MIT", "dependencies": { - "universalify": "^2.0.0" + "app-builder-lib": "26.0.12", + "builder-util": "26.0.11", + "builder-util-runtime": "9.3.1", + "fs-extra": "^10.1.0", + "iconv-lite": "^0.6.2", + "js-yaml": "^4.1.0" }, "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/dmg-builder/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" + "dmg-license": "^1.0.11" } }, "node_modules/dmg-license": { @@ -7568,44 +6644,6 @@ "electron-winstaller": "5.4.0" } }, - "node_modules/electron-builder/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-builder/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-builder/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/electron-log": { "version": "5.4.3", "resolved": "https://registry.npmjs.org/electron-log/-/electron-log-5.4.3.tgz", @@ -7632,44 +6670,6 @@ "mime": "^2.5.2" } }, - "node_modules/electron-publish/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-publish/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-publish/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/electron-to-chromium": { "version": "1.5.267", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", @@ -7693,41 +6693,6 @@ "tiny-typed-emitter": "^2.1.0" } }, - "node_modules/electron-updater/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/electron-updater/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/electron-updater/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/electron-vite": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/electron-vite/-/electron-vite-5.0.0.tgz", @@ -7796,6 +6761,28 @@ "node": ">=6 <7 || >=8" } }, + "node_modules/electron-winstaller/node_modules/jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", + "dev": true, + "license": "MIT", + "peer": true, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/electron-winstaller/node_modules/universalify": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">= 4.0.0" + } + }, "node_modules/electron/node_modules/@types/node": { "version": "22.19.3", "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.3.tgz", @@ -8353,9 +7340,9 @@ } }, "node_modules/esquery": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", - "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", "dev": true, "license": "BSD-3-Clause", "dependencies": { @@ -8511,6 +7498,24 @@ "pend": "~1.2.0" } }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, "node_modules/file-entry-cache": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", @@ -8641,6 +7646,19 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/form-data": { "version": "4.0.5", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", @@ -8700,31 +7718,30 @@ } }, "node_modules/fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", - "dev": true, + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", "license": "MIT", "dependencies": { "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" }, "engines": { - "node": ">=6 <7 || >=8" + "node": ">=12" } }, "node_modules/fs-minipass": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-3.0.3.tgz", - "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", "dev": true, "license": "ISC", "dependencies": { - "minipass": "^7.0.3" + "minipass": "^3.0.0" }, "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": ">= 8" } }, "node_modules/fs.realpath": { @@ -8916,9 +7933,9 @@ } }, "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", + "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, "license": "ISC", @@ -8926,12 +7943,11 @@ "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "minimatch": "^5.0.1", + "once": "^1.3.0" }, "engines": { - "node": "*" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/isaacs" @@ -8950,17 +7966,27 @@ "node": ">=10.13.0" } }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, "node_modules/glob/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", "dev": true, "license": "ISC", "dependencies": { - "brace-expansion": "^1.1.7" + "brace-expansion": "^2.0.1" }, "engines": { - "node": "*" + "node": ">=10" } }, "node_modules/global-agent": { @@ -8983,9 +8009,9 @@ } }, "node_modules/globals": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-17.0.0.tgz", - "integrity": "sha512-gv5BeD2EssA793rlFWVPMMCqefTlpusw6/2TbAVMy0FzcG8wKJn4O+NqJ4+XWmmwrayJgw5TzrmWjFgmz1XPqw==", + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-16.5.0.tgz", + "integrity": "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==", "dev": true, "license": "MIT", "engines": { @@ -9242,16 +8268,16 @@ "license": "ISC" }, "node_modules/html-encoding-sniffer": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-6.0.0.tgz", - "integrity": "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz", + "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==", "dev": true, "license": "MIT", "dependencies": { - "@exodus/bytes": "^1.6.0" + "whatwg-encoding": "^3.1.1" }, "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + "node": ">=18" } }, "node_modules/html-parse-stringify": { @@ -10152,35 +9178,35 @@ } }, "node_modules/jsdom": { - "version": "27.4.0", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.4.0.tgz", - "integrity": "sha512-mjzqwWRD9Y1J1KUi7W97Gja1bwOOM5Ug0EZ6UDK3xS7j7mndrkwozHtSblfomlzyB4NepioNt+B2sOSzczVgtQ==", + "version": "26.1.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-26.1.0.tgz", + "integrity": "sha512-Cvc9WUhxSMEo4McES3P7oK3QaXldCfNWp7pl2NNeiIFlCoLr3kfq9kb1fxftiwk1FLV7CvpvDfonxtzUDeSOPg==", "dev": true, "license": "MIT", "dependencies": { - "@acemir/cssom": "^0.9.28", - "@asamuzakjp/dom-selector": "^6.7.6", - "@exodus/bytes": "^1.6.0", - "cssstyle": "^5.3.4", - "data-urls": "^6.0.0", - "decimal.js": "^10.6.0", - "html-encoding-sniffer": "^6.0.0", + "cssstyle": "^4.2.1", + "data-urls": "^5.0.0", + "decimal.js": "^10.5.0", + "html-encoding-sniffer": "^4.0.0", "http-proxy-agent": "^7.0.2", "https-proxy-agent": "^7.0.6", "is-potential-custom-element-name": "^1.0.1", - "parse5": "^8.0.0", + "nwsapi": "^2.2.16", + "parse5": "^7.2.1", + "rrweb-cssom": "^0.8.0", "saxes": "^6.0.0", "symbol-tree": "^3.2.4", - "tough-cookie": "^6.0.0", + "tough-cookie": "^5.1.1", "w3c-xmlserializer": "^5.0.0", - "webidl-conversions": "^8.0.0", + "webidl-conversions": "^7.0.0", + "whatwg-encoding": "^3.1.1", "whatwg-mimetype": "^4.0.0", - "whatwg-url": "^15.1.0", - "ws": "^8.18.3", + "whatwg-url": "^14.1.1", + "ws": "^8.18.0", "xml-name-validator": "^5.0.0" }, "engines": { - "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + "node": ">=18" }, "peerDependencies": { "canvas": "^3.0.0" @@ -10247,11 +9273,13 @@ } }, "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", - "dev": true, + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, "optionalDependencies": { "graceful-fs": "^4.1.6" } @@ -10616,6 +9644,19 @@ "node": ">=20.0.0" } }, + "node_modules/listr2/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, "node_modules/listr2/node_modules/ansi-styles": { "version": "6.2.3", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", @@ -10646,6 +9687,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/listr2/node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "dev": true, + "license": "MIT" + }, "node_modules/listr2/node_modules/is-fullwidth-code-point": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", @@ -10696,6 +9744,58 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/listr2/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/listr2/node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/listr2/node_modules/wrap-ansi/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -10776,6 +9876,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/log-update/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, "node_modules/log-update/node_modules/ansi-styles": { "version": "6.2.3", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", @@ -10789,6 +9902,29 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/log-update/node_modules/cli-cursor": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz", + "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "dev": true, + "license": "MIT" + }, "node_modules/log-update/node_modules/is-fullwidth-code-point": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", @@ -10805,6 +9941,52 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/log-update/node_modules/onetime": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz", + "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-function": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/restore-cursor": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz", + "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^7.0.0", + "signal-exit": "^4.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/log-update/node_modules/slice-ansi": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-7.1.2.tgz", @@ -10822,6 +10004,58 @@ "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, + "node_modules/log-update/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/log-update/node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/longest-streak": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", @@ -10866,9 +10100,9 @@ } }, "node_modules/lucide-react": { - "version": "0.562.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.562.0.tgz", - "integrity": "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==", + "version": "0.560.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.560.0.tgz", + "integrity": "sha512-NwKoUA/aBShsdL8WE5lukV2F/tjHzQRlonQs7fkNGI1sCT0Ay4a9Ap3ST2clUUkcY+9eQ0pBe2hybTQd2fmyDA==", "license": "ISC", "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" @@ -10896,26 +10130,83 @@ } }, "node_modules/make-fetch-happen": { - "version": "14.0.3", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-14.0.3.tgz", - "integrity": "sha512-QMjGbFTP0blj97EeidG5hk/QhKQ3T4ICckQGLgz38QF7Vgbk6e6FTARN8KhKxyBbWn8R0HU+bnw8aSoFPD4qtQ==", + "version": "10.2.1", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz", + "integrity": "sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w==", "dev": true, "license": "ISC", "dependencies": { - "@npmcli/agent": "^3.0.0", - "cacache": "^19.0.1", - "http-cache-semantics": "^4.1.1", - "minipass": "^7.0.2", - "minipass-fetch": "^4.0.0", + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^2.0.3", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", - "negotiator": "^1.0.0", - "proc-log": "^5.0.0", + "negotiator": "^0.6.3", "promise-retry": "^2.0.1", - "ssri": "^12.0.0" + "socks-proxy-agent": "^7.0.0", + "ssri": "^9.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/make-fetch-happen/node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/make-fetch-happen/node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/make-fetch-happen/node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "6", + "debug": "4" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": ">= 6" + } + }, + "node_modules/make-fetch-happen/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" } }, "node_modules/markdown-table": { @@ -11234,13 +10525,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/mdn-data": { - "version": "2.12.2", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz", - "integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==", - "dev": true, - "license": "CC0-1.0" - }, "node_modules/micromark": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", @@ -11818,6 +11102,19 @@ "node": ">=8.6" } }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, "node_modules/mime": { "version": "2.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz", @@ -11887,6 +11184,16 @@ "node": ">=4" } }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/minimatch": { "version": "10.1.1", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz", @@ -11914,41 +11221,44 @@ } }, "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", "dev": true, "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">=8" } }, "node_modules/minipass-collect": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-2.0.1.tgz", - "integrity": "sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", + "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", "dev": true, "license": "ISC", "dependencies": { - "minipass": "^7.0.3" + "minipass": "^3.0.0" }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">= 8" } }, "node_modules/minipass-fetch": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-4.0.1.tgz", - "integrity": "sha512-j7U11C5HXigVuutxebFadoYBbd7VSdZWggSe64NVdvWNBqGAiXPL2QVCehjmw7lY1oF9gOllYbORh+hiNgfPgQ==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.1.2.tgz", + "integrity": "sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA==", "dev": true, "license": "MIT", "dependencies": { - "minipass": "^7.0.3", + "minipass": "^3.1.6", "minipass-sized": "^1.0.3", - "minizlib": "^3.0.1" + "minizlib": "^2.1.2" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" }, "optionalDependencies": { "encoding": "^0.1.13" @@ -11967,26 +11277,6 @@ "node": ">= 8" } }, - "node_modules/minipass-flush/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minipass-flush/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "license": "ISC" - }, "node_modules/minipass-pipeline": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", @@ -12000,26 +11290,6 @@ "node": ">=8" } }, - "node_modules/minipass-pipeline/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minipass-pipeline/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "license": "ISC" - }, "node_modules/minipass-sized": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", @@ -12033,20 +11303,7 @@ "node": ">=8" } }, - "node_modules/minipass-sized/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minipass-sized/node_modules/yallist": { + "node_modules/minipass/node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", @@ -12054,18 +11311,26 @@ "license": "ISC" }, "node_modules/minizlib": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz", - "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", "dev": true, "license": "MIT", "dependencies": { - "minipass": "^7.1.2" + "minipass": "^3.0.0", + "yallist": "^4.0.0" }, "engines": { - "node": ">= 18" + "node": ">= 8" } }, + "node_modules/minizlib/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, "node_modules/mkdirp": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", @@ -12166,9 +11431,9 @@ "license": "MIT" }, "node_modules/negotiator": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", - "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", + "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", "dev": true, "license": "MIT", "engines": { @@ -12176,16 +11441,16 @@ } }, "node_modules/node-abi": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-4.24.0.tgz", - "integrity": "sha512-u2EC1CeNe25uVtX3EZbdQ275c74zdZmmpzrHEQh2aIYqoVjlglfUpOX9YY85x1nlBydEKDVaSmMNhR7N82Qj8A==", + "version": "3.85.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.85.0.tgz", + "integrity": "sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==", "dev": true, "license": "MIT", "dependencies": { - "semver": "^7.6.3" + "semver": "^7.3.5" }, "engines": { - "node": ">=22.12.0" + "node": ">=10" } }, "node_modules/node-addon-api": { @@ -12206,94 +11471,6 @@ "semver": "^7.3.5" } }, - "node_modules/node-gyp": { - "version": "11.5.0", - "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-11.5.0.tgz", - "integrity": "sha512-ra7Kvlhxn5V9Slyus0ygMa2h+UqExPqUIkfk7Pc8QTLT956JLSy51uWFwHtIYy0vI8cB4BDhc/S03+880My/LQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "env-paths": "^2.2.0", - "exponential-backoff": "^3.1.1", - "graceful-fs": "^4.2.6", - "make-fetch-happen": "^14.0.3", - "nopt": "^8.0.0", - "proc-log": "^5.0.0", - "semver": "^7.3.5", - "tar": "^7.4.3", - "tinyglobby": "^0.2.12", - "which": "^5.0.0" - }, - "bin": { - "node-gyp": "bin/node-gyp.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/node-gyp/node_modules/chownr": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", - "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, - "node_modules/node-gyp/node_modules/isexe": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-3.1.1.tgz", - "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=16" - } - }, - "node_modules/node-gyp/node_modules/tar": { - "version": "7.5.2", - "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.2.tgz", - "integrity": "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/fs-minipass": "^4.0.0", - "chownr": "^3.0.0", - "minipass": "^7.1.2", - "minizlib": "^3.1.0", - "yallist": "^5.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/node-gyp/node_modules/which": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/which/-/which-5.0.0.tgz", - "integrity": "sha512-JEdGzHwwkrbWoGOlIHqQ5gtprKGOenpDHpxE9zVR1bWbOtYRyPPHMe9FaP6x61CmNaTThSkb0DAJte5jD+DmzQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "isexe": "^3.1.1" - }, - "bin": { - "node-which": "bin/which.js" - }, - "engines": { - "node": "^18.17.0 || >=20.5.0" - } - }, - "node_modules/node-gyp/node_modules/yallist": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", - "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", - "dev": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, "node_modules/node-releases": { "version": "2.0.27", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", @@ -12302,19 +11479,19 @@ "license": "MIT" }, "node_modules/nopt": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-8.1.0.tgz", - "integrity": "sha512-ieGu42u/Qsa4TFktmaKEwM6MQH0pOWnaB3htzh0JRtx84+Mebc0cbZYN5bC+6WTZ4+77xrL9Pn5m7CV6VIkV7A==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-6.0.0.tgz", + "integrity": "sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==", "dev": true, "license": "ISC", "dependencies": { - "abbrev": "^3.0.0" + "abbrev": "^1.0.0" }, "bin": { "nopt": "bin/nopt.js" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/normalize-url": { @@ -12330,6 +11507,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/nwsapi": { + "version": "2.2.23", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.23.tgz", + "integrity": "sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==", + "dev": true, + "license": "MIT" + }, "node_modules/object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", @@ -12460,16 +11644,16 @@ } }, "node_modules/onetime": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz", - "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "dev": true, "license": "MIT", "dependencies": { - "mimic-function": "^5.0.0" + "mimic-fn": "^2.1.0" }, "engines": { - "node": ">=18" + "node": ">=6" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -12517,69 +11701,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/ora/node_modules/cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "dev": true, - "license": "MIT", - "dependencies": { - "restore-cursor": "^3.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ora/node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ora/node_modules/restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "dev": true, - "license": "MIT", - "dependencies": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ora/node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/ora/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/own-keys": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", @@ -12641,13 +11762,16 @@ } }, "node_modules/p-map": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.4.tgz", - "integrity": "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", "dev": true, "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, "engines": { - "node": ">=18" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -12699,9 +11823,9 @@ "license": "MIT" }, "node_modules/parse5": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz", - "integrity": "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==", + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", "dev": true, "license": "MIT", "dependencies": { @@ -12772,6 +11896,16 @@ "dev": true, "license": "ISC" }, + "node_modules/path-scurry/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/pathe": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", @@ -12809,13 +11943,13 @@ "license": "ISC" }, "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", "engines": { - "node": ">=8.6" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/jonschlinkert" @@ -13010,14 +12144,22 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/pretty-format/node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT", + "peer": true + }, "node_modules/proc-log": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-5.0.0.tgz", - "integrity": "sha512-Azwzvl90HaF0aCz1JrDdXQykFakSSNPaPoiZ9fm5qJIMHioDZEi7OAdRwSm6rSoPtY3Qutnm3L7ogmg3dc+wbQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-2.0.1.tgz", + "integrity": "sha512-Kcmo2FhfDTXdcbfDH76N7uBYHINxc/8GW7UAVuVP9I+Va3uHSerrnKV6dLooga/gh7GlgzuCCr/eoldnL1muGw==", "dev": true, "license": "ISC", "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/progress": { @@ -13063,13 +12205,6 @@ "react-is": "^16.13.1" } }, - "node_modules/prop-types/node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "dev": true, - "license": "MIT" - }, "node_modules/property-information": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", @@ -13136,12 +12271,12 @@ } }, "node_modules/react-i18next": { - "version": "16.5.1", - "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-16.5.1.tgz", - "integrity": "sha512-Hks6UIRZWW4c+qDAnx1csVsCGYeIR4MoBGQgJ+NUoNnO6qLxXuf8zu0xdcinyXUORgGzCdRsexxO1Xzv3sTdnw==", + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-16.5.0.tgz", + "integrity": "sha512-IMpPTyCTKxEj8klCrLKUTIUa8uYTd851+jcu2fJuUB9Agkk9Qq8asw4omyeHVnOXHrLgQJGTm5zTvn8HpaPiqw==", "license": "MIT", "dependencies": { - "@babel/runtime": "^7.28.4", + "@babel/runtime": "^7.27.6", "html-parse-stringify": "^3.0.1", "use-sync-external-store": "^1.6.0" }, @@ -13163,12 +12298,11 @@ } }, "node_modules/react-is": { - "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", - "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/react-markdown": { "version": "10.1.0", @@ -13255,13 +12389,13 @@ } }, "node_modules/react-resizable-panels": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/react-resizable-panels/-/react-resizable-panels-4.2.0.tgz", - "integrity": "sha512-X/WbnyT/bgx09KEGvtJvaTr3axRrcBGcJdELIoGXZipCxc2hPwFsH/pfpVgwNVq5LpQxF/E5pPXGTQdjBnidPw==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/react-resizable-panels/-/react-resizable-panels-3.0.6.tgz", + "integrity": "sha512-b3qKHQ3MLqOgSS+FRYKapNkJZf5EQzuf6+RLiq1/IlTHw99YrZ2NJZLk4hQIzTnnIkRg2LUqyVinu6YWWpUYew==", "license": "MIT", "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" + "react": "^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", + "react-dom": "^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, "node_modules/react-style-singleton": { @@ -13327,6 +12461,20 @@ "url": "https://paulmillr.com/funding/" } }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/reflect.getprototypeof": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", @@ -13447,16 +12595,6 @@ "node": ">=0.10.0" } }, - "node_modules/require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/resedit": { "version": "1.7.2", "resolved": "https://registry.npmjs.org/resedit/-/resedit-1.7.2.tgz", @@ -13524,20 +12662,17 @@ } }, "node_modules/restore-cursor": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz", - "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", "dev": true, "license": "MIT", "dependencies": { - "onetime": "^7.0.0", - "signal-exit": "^4.1.0" + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" }, "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, "node_modules/retry": { @@ -13558,18 +12693,55 @@ "license": "MIT" }, "node_modules/rimraf": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", - "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, "license": "ISC", - "peer": true, "dependencies": { "glob": "^7.1.3" }, "bin": { "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" } }, "node_modules/roarr": { @@ -13592,9 +12764,9 @@ } }, "node_modules/rollup": { - "version": "4.54.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.54.0.tgz", - "integrity": "sha512-3nk8Y3a9Ea8szgKhinMlGMhGMw89mqule3KWczxhIzqudyHdCIOHw8WJlj/r329fACjKLEh13ZSk7oE22kyeIw==", + "version": "4.53.4", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.4.tgz", + "integrity": "sha512-YpXaaArg0MvrnJpvduEDYIp7uGOqKXbH9NsHGQ6SxKCOsNAjZF018MmxefFUulVP2KLtiGw1UvZbr+/ekjvlDg==", "dev": true, "license": "MIT", "dependencies": { @@ -13608,31 +12780,38 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.54.0", - "@rollup/rollup-android-arm64": "4.54.0", - "@rollup/rollup-darwin-arm64": "4.54.0", - "@rollup/rollup-darwin-x64": "4.54.0", - "@rollup/rollup-freebsd-arm64": "4.54.0", - "@rollup/rollup-freebsd-x64": "4.54.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.54.0", - "@rollup/rollup-linux-arm-musleabihf": "4.54.0", - "@rollup/rollup-linux-arm64-gnu": "4.54.0", - "@rollup/rollup-linux-arm64-musl": "4.54.0", - "@rollup/rollup-linux-loong64-gnu": "4.54.0", - "@rollup/rollup-linux-ppc64-gnu": "4.54.0", - "@rollup/rollup-linux-riscv64-gnu": "4.54.0", - "@rollup/rollup-linux-riscv64-musl": "4.54.0", - "@rollup/rollup-linux-s390x-gnu": "4.54.0", - "@rollup/rollup-linux-x64-gnu": "4.54.0", - "@rollup/rollup-linux-x64-musl": "4.54.0", - "@rollup/rollup-openharmony-arm64": "4.54.0", - "@rollup/rollup-win32-arm64-msvc": "4.54.0", - "@rollup/rollup-win32-ia32-msvc": "4.54.0", - "@rollup/rollup-win32-x64-gnu": "4.54.0", - "@rollup/rollup-win32-x64-msvc": "4.54.0", + "@rollup/rollup-android-arm-eabi": "4.53.4", + "@rollup/rollup-android-arm64": "4.53.4", + "@rollup/rollup-darwin-arm64": "4.53.4", + "@rollup/rollup-darwin-x64": "4.53.4", + "@rollup/rollup-freebsd-arm64": "4.53.4", + "@rollup/rollup-freebsd-x64": "4.53.4", + "@rollup/rollup-linux-arm-gnueabihf": "4.53.4", + "@rollup/rollup-linux-arm-musleabihf": "4.53.4", + "@rollup/rollup-linux-arm64-gnu": "4.53.4", + "@rollup/rollup-linux-arm64-musl": "4.53.4", + "@rollup/rollup-linux-loong64-gnu": "4.53.4", + "@rollup/rollup-linux-ppc64-gnu": "4.53.4", + "@rollup/rollup-linux-riscv64-gnu": "4.53.4", + "@rollup/rollup-linux-riscv64-musl": "4.53.4", + "@rollup/rollup-linux-s390x-gnu": "4.53.4", + "@rollup/rollup-linux-x64-gnu": "4.53.4", + "@rollup/rollup-linux-x64-musl": "4.53.4", + "@rollup/rollup-openharmony-arm64": "4.53.4", + "@rollup/rollup-win32-arm64-msvc": "4.53.4", + "@rollup/rollup-win32-ia32-msvc": "4.53.4", + "@rollup/rollup-win32-x64-gnu": "4.53.4", + "@rollup/rollup-win32-x64-msvc": "4.53.4", "fsevents": "~2.3.2" } }, + "node_modules/rrweb-cssom": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz", + "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==", + "dev": true, + "license": "MIT" + }, "node_modules/safe-array-concat": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", @@ -13944,17 +13123,11 @@ "license": "ISC" }, "node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true, - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } + "license": "ISC" }, "node_modules/simple-update-notifier": { "version": "2.0.0", @@ -14012,18 +13185,31 @@ } }, "node_modules/socks-proxy-agent": { - "version": "8.0.5", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", - "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", + "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", "dev": true, "license": "MIT", "dependencies": { - "agent-base": "^7.1.2", - "debug": "^4.3.4", - "socks": "^2.8.3" + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" }, "engines": { - "node": ">= 14" + "node": ">= 10" + } + }, + "node_modules/socks-proxy-agent/node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" } }, "node_modules/source-map": { @@ -14076,16 +13262,16 @@ "optional": true }, "node_modules/ssri": { - "version": "12.0.0", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-12.0.0.tgz", - "integrity": "sha512-S7iGNosepx9RadX82oimUkvr0Ct7IjJbEbs4mJcTxst8um95J3sDYU1RBEOvdu6oL1Wek2ODI5i4MAw+dZ6cAQ==", + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-9.0.1.tgz", + "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==", "dev": true, "license": "ISC", "dependencies": { - "minipass": "^7.0.3" + "minipass": "^3.1.1" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/stackback": { @@ -14177,32 +13363,6 @@ "node": ">=8" } }, - "node_modules/string-width-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/string.prototype.matchall": { "version": "4.0.12", "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz", @@ -14316,19 +13476,16 @@ } }, "node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dev": true, "license": "MIT", "dependencies": { - "ansi-regex": "^6.0.1" + "ansi-regex": "^5.0.1" }, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" + "node": ">=8" } }, "node_modules/strip-ansi-cjs": { @@ -14345,17 +13502,17 @@ "node": ">=8" } }, - "node_modules/strip-ansi/node_modules/ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", "dev": true, "license": "MIT", - "engines": { - "node": ">=12" + "dependencies": { + "min-indent": "^1.0.0" }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" + "engines": { + "node": ">=8" } }, "node_modules/strip-json-comments": { @@ -14470,78 +13627,25 @@ "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", "dev": true, - "license": "ISC", - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/tar/node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "dev": true, - "license": "ISC", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/tar/node_modules/fs-minipass/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/tar/node_modules/minipass": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=8" - } - }, - "node_modules/tar/node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dev": true, - "license": "MIT", + "license": "ISC", "dependencies": { - "minipass": "^3.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", "yallist": "^4.0.0" }, "engines": { - "node": ">= 8" + "node": ">=10" } }, - "node_modules/tar/node_modules/minizlib/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", "dev": true, "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, "engines": { "node": ">=8" } @@ -14579,42 +13683,41 @@ "fs-extra": "^10.0.0" } }, - "node_modules/temp-file/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "node_modules/temp/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, - "license": "MIT", + "license": "ISC", + "peer": true, "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" }, "engines": { - "node": ">=12" + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/temp-file/node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "node_modules/temp/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, - "license": "MIT", + "license": "ISC", + "peer": true, "dependencies": { - "universalify": "^2.0.0" + "brace-expansion": "^1.1.7" }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/temp-file/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", "engines": { - "node": ">= 10.0.0" + "node": "*" } }, "node_modules/temp/node_modules/mkdirp": { @@ -14631,6 +13734,21 @@ "mkdirp": "bin/cmd.js" } }, + "node_modules/temp/node_modules/rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, "node_modules/tiny-async-pool": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/tiny-async-pool/-/tiny-async-pool-1.3.0.tgz", @@ -14691,37 +13809,6 @@ "url": "https://github.com/sponsors/SuperchupuDev" } }, - "node_modules/tinyglobby/node_modules/fdir": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", - "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } - } - }, - "node_modules/tinyglobby/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, "node_modules/tinyrainbow": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz", @@ -14733,22 +13820,22 @@ } }, "node_modules/tldts": { - "version": "7.0.19", - "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.19.tgz", - "integrity": "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==", + "version": "6.1.86", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.86.tgz", + "integrity": "sha512-WMi/OQ2axVTf/ykqCQgXiIct+mSQDFdH2fkwhPwgEwvJ1kSzZRiinb0zF2Xb8u4+OqPChmyI6MEu4EezNJz+FQ==", "dev": true, "license": "MIT", "dependencies": { - "tldts-core": "^7.0.19" + "tldts-core": "^6.1.86" }, "bin": { "tldts": "bin/cli.js" } }, "node_modules/tldts-core": { - "version": "7.0.19", - "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.19.tgz", - "integrity": "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==", + "version": "6.1.86", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.86.tgz", + "integrity": "sha512-Je6p7pkk+KMzMv2XXKmAE3McmolOQFdxkKw0R8EYNr7sELW46JqnNeTX8ybPiQgvg1ymCoF8LXs5fzFaZvJPTA==", "dev": true, "license": "MIT" }, @@ -14786,29 +13873,29 @@ } }, "node_modules/tough-cookie": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.0.tgz", - "integrity": "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-5.1.2.tgz", + "integrity": "sha512-FVDYdxtnj0G6Qm/DhNPSb8Ju59ULcup3tuJxkFb5K8Bv2pUXILbf0xZWU8PX8Ov19OXljbUyveOFwRMwkXzO+A==", "dev": true, "license": "BSD-3-Clause", "dependencies": { - "tldts": "^7.0.5" + "tldts": "^6.1.32" }, "engines": { "node": ">=16" } }, "node_modules/tr46": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz", - "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.1.1.tgz", + "integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==", "dev": true, "license": "MIT", "dependencies": { "punycode": "^2.3.1" }, "engines": { - "node": ">=20" + "node": ">=18" } }, "node_modules/trim-lines": { @@ -14842,9 +13929,9 @@ } }, "node_modules/ts-api-utils": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.3.0.tgz", - "integrity": "sha512-6eg3Y9SF7SsAvGzRHQvvc1skDAhwI4YQ32ui1scxD1Ccr0G5qIIbUBT3pFTKX8kmWIQClHobtUdNuaBgwdfdWg==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", + "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", "dev": true, "license": "MIT", "engines": { @@ -14980,16 +14067,16 @@ } }, "node_modules/typescript-eslint": { - "version": "8.51.0", - "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.51.0.tgz", - "integrity": "sha512-jh8ZuM5oEh2PSdyQG9YAEM1TCGuWenLSuSUhf/irbVUNW9O5FhbFVONviN2TgMTBnUmyHv7E56rYnfLZK6TkiA==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.49.0.tgz", + "integrity": "sha512-zRSVH1WXD0uXczCXw+nsdjGPUdx4dfrs5VQoHnUWmv1U3oNlAKv4FUNdLDhVUg+gYn+a5hUESqch//Rv5wVhrg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/eslint-plugin": "8.51.0", - "@typescript-eslint/parser": "8.51.0", - "@typescript-eslint/typescript-estree": "8.51.0", - "@typescript-eslint/utils": "8.51.0" + "@typescript-eslint/eslint-plugin": "8.49.0", + "@typescript-eslint/parser": "8.49.0", + "@typescript-eslint/typescript-estree": "8.49.0", + "@typescript-eslint/utils": "8.49.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -15049,29 +14136,29 @@ } }, "node_modules/unique-filename": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-4.0.0.tgz", - "integrity": "sha512-XSnEewXmQ+veP7xX2dS5Q4yZAvO40cBN2MWkJ7D/6sW4Dg6wYBNwM1Vrnz1FhH5AdeLIlUXRI9e28z1YZi71NQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-2.0.1.tgz", + "integrity": "sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==", "dev": true, "license": "ISC", "dependencies": { - "unique-slug": "^5.0.0" + "unique-slug": "^3.0.0" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/unique-slug": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-5.0.0.tgz", - "integrity": "sha512-9OdaqO5kwqR+1kVgHAhsp5vPNU0hnxRa26rBFNfNgM7M6pNtgzeBn3s/xbyCQL3dcjzOatcef6UUHpB/6MaETg==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-3.0.0.tgz", + "integrity": "sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w==", "dev": true, "license": "ISC", "dependencies": { "imurmurhash": "^0.1.4" }, "engines": { - "node": "^18.17.0 || >=20.5.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/unist-util-is": { @@ -15143,19 +14230,18 @@ } }, "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "dev": true, + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", "license": "MIT", "engines": { - "node": ">= 4.0.0" + "node": ">= 10.0.0" } }, "node_modules/update-browserslist-db": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", - "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.2.tgz", + "integrity": "sha512-E85pfNzMQ9jpKkA7+TJAi4TJN+tBCuWh5rUcS/sv6cFi+1q9LYDwDI5dpUL0u/73EElyQ8d3TEaeW4sPedBqYA==", "dev": true, "funding": [ { @@ -15391,9 +14477,9 @@ } }, "node_modules/vite/node_modules/@esbuild/aix-ppc64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz", - "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.1.tgz", + "integrity": "sha512-HHB50pdsBX6k47S4u5g/CaLjqS3qwaOVE5ILsq64jyzgMhLuCuZ8rGzM9yhsAjfjkbgUPMzZEPa7DAp7yz6vuA==", "cpu": [ "ppc64" ], @@ -15408,9 +14494,9 @@ } }, "node_modules/vite/node_modules/@esbuild/android-arm": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz", - "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.1.tgz", + "integrity": "sha512-kFqa6/UcaTbGm/NncN9kzVOODjhZW8e+FRdSeypWe6j33gzclHtwlANs26JrupOntlcWmB0u8+8HZo8s7thHvg==", "cpu": [ "arm" ], @@ -15425,9 +14511,9 @@ } }, "node_modules/vite/node_modules/@esbuild/android-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz", - "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.1.tgz", + "integrity": "sha512-45fuKmAJpxnQWixOGCrS+ro4Uvb4Re9+UTieUY2f8AEc+t7d4AaZ6eUJ3Hva7dtrxAAWHtlEFsXFMAgNnGU9uQ==", "cpu": [ "arm64" ], @@ -15442,9 +14528,9 @@ } }, "node_modules/vite/node_modules/@esbuild/android-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz", - "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.1.tgz", + "integrity": "sha512-LBEpOz0BsgMEeHgenf5aqmn/lLNTFXVfoWMUox8CtWWYK9X4jmQzWjoGoNb8lmAYml/tQ/Ysvm8q7szu7BoxRQ==", "cpu": [ "x64" ], @@ -15459,9 +14545,9 @@ } }, "node_modules/vite/node_modules/@esbuild/darwin-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz", - "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.1.tgz", + "integrity": "sha512-veg7fL8eMSCVKL7IW4pxb54QERtedFDfY/ASrumK/SbFsXnRazxY4YykN/THYqFnFwJ0aVjiUrVG2PwcdAEqQQ==", "cpu": [ "arm64" ], @@ -15476,9 +14562,9 @@ } }, "node_modules/vite/node_modules/@esbuild/darwin-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz", - "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.1.tgz", + "integrity": "sha512-+3ELd+nTzhfWb07Vol7EZ+5PTbJ/u74nC6iv4/lwIU99Ip5uuY6QoIf0Hn4m2HoV0qcnRivN3KSqc+FyCHjoVQ==", "cpu": [ "x64" ], @@ -15493,9 +14579,9 @@ } }, "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz", - "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.1.tgz", + "integrity": "sha512-/8Rfgns4XD9XOSXlzUDepG8PX+AVWHliYlUkFI3K3GB6tqbdjYqdhcb4BKRd7C0BhZSoaCxhv8kTcBrcZWP+xg==", "cpu": [ "arm64" ], @@ -15510,9 +14596,9 @@ } }, "node_modules/vite/node_modules/@esbuild/freebsd-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz", - "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.1.tgz", + "integrity": "sha512-GITpD8dK9C+r+5yRT/UKVT36h/DQLOHdwGVwwoHidlnA168oD3uxA878XloXebK4Ul3gDBBIvEdL7go9gCUFzQ==", "cpu": [ "x64" ], @@ -15527,9 +14613,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-arm": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz", - "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.1.tgz", + "integrity": "sha512-ieMID0JRZY/ZeCrsFQ3Y3NlHNCqIhTprJfDgSB3/lv5jJZ8FX3hqPyXWhe+gvS5ARMBJ242PM+VNz/ctNj//eA==", "cpu": [ "arm" ], @@ -15544,9 +14630,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz", - "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.1.tgz", + "integrity": "sha512-W9//kCrh/6in9rWIBdKaMtuTTzNj6jSeG/haWBADqLLa9P8O5YSRDzgD5y9QBok4AYlzS6ARHifAb75V6G670Q==", "cpu": [ "arm64" ], @@ -15561,9 +14647,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-ia32": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz", - "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.1.tgz", + "integrity": "sha512-VIUV4z8GD8rtSVMfAj1aXFahsi/+tcoXXNYmXgzISL+KB381vbSTNdeZHHHIYqFyXcoEhu9n5cT+05tRv13rlw==", "cpu": [ "ia32" ], @@ -15578,9 +14664,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-loong64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz", - "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.1.tgz", + "integrity": "sha512-l4rfiiJRN7sTNI//ff65zJ9z8U+k6zcCg0LALU5iEWzY+a1mVZ8iWC1k5EsNKThZ7XCQ6YWtsZ8EWYm7r1UEsg==", "cpu": [ "loong64" ], @@ -15595,9 +14681,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-mips64el": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz", - "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.1.tgz", + "integrity": "sha512-U0bEuAOLvO/DWFdygTHWY8C067FXz+UbzKgxYhXC0fDieFa0kDIra1FAhsAARRJbvEyso8aAqvPdNxzWuStBnA==", "cpu": [ "mips64el" ], @@ -15612,9 +14698,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-ppc64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz", - "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.1.tgz", + "integrity": "sha512-NzdQ/Xwu6vPSf/GkdmRNsOfIeSGnh7muundsWItmBsVpMoNPVpM61qNzAVY3pZ1glzzAxLR40UyYM23eaDDbYQ==", "cpu": [ "ppc64" ], @@ -15629,9 +14715,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-riscv64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz", - "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.1.tgz", + "integrity": "sha512-7zlw8p3IApcsN7mFw0O1Z1PyEk6PlKMu18roImfl3iQHTnr/yAfYv6s4hXPidbDoI2Q0pW+5xeoM4eTCC0UdrQ==", "cpu": [ "riscv64" ], @@ -15646,9 +14732,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-s390x": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz", - "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.1.tgz", + "integrity": "sha512-cGj5wli+G+nkVQdZo3+7FDKC25Uh4ZVwOAK6A06Hsvgr8WqBBuOy/1s+PUEd/6Je+vjfm6stX0kmib5b/O2Ykw==", "cpu": [ "s390x" ], @@ -15663,9 +14749,9 @@ } }, "node_modules/vite/node_modules/@esbuild/linux-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz", - "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.1.tgz", + "integrity": "sha512-z3H/HYI9MM0HTv3hQZ81f+AKb+yEoCRlUby1F80vbQ5XdzEMyY/9iNlAmhqiBKw4MJXwfgsh7ERGEOhrM1niMA==", "cpu": [ "x64" ], @@ -15680,9 +14766,9 @@ } }, "node_modules/vite/node_modules/@esbuild/netbsd-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz", - "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.1.tgz", + "integrity": "sha512-wzC24DxAvk8Em01YmVXyjl96Mr+ecTPyOuADAvjGg+fyBpGmxmcr2E5ttf7Im8D0sXZihpxzO1isus8MdjMCXQ==", "cpu": [ "arm64" ], @@ -15697,9 +14783,9 @@ } }, "node_modules/vite/node_modules/@esbuild/netbsd-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz", - "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.1.tgz", + "integrity": "sha512-1YQ8ybGi2yIXswu6eNzJsrYIGFpnlzEWRl6iR5gMgmsrR0FcNoV1m9k9sc3PuP5rUBLshOZylc9nqSgymI+TYg==", "cpu": [ "x64" ], @@ -15714,9 +14800,9 @@ } }, "node_modules/vite/node_modules/@esbuild/openbsd-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz", - "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.1.tgz", + "integrity": "sha512-5Z+DzLCrq5wmU7RDaMDe2DVXMRm2tTDvX2KU14JJVBN2CT/qov7XVix85QoJqHltpvAOZUAc3ndU56HSMWrv8g==", "cpu": [ "arm64" ], @@ -15731,9 +14817,9 @@ } }, "node_modules/vite/node_modules/@esbuild/openbsd-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz", - "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.1.tgz", + "integrity": "sha512-Q73ENzIdPF5jap4wqLtsfh8YbYSZ8Q0wnxplOlZUOyZy7B4ZKW8DXGWgTCZmF8VWD7Tciwv5F4NsRf6vYlZtqg==", "cpu": [ "x64" ], @@ -15748,9 +14834,9 @@ } }, "node_modules/vite/node_modules/@esbuild/openharmony-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz", - "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.1.tgz", + "integrity": "sha512-ajbHrGM/XiK+sXM0JzEbJAen+0E+JMQZ2l4RR4VFwvV9JEERx+oxtgkpoKv1SevhjavK2z2ReHk32pjzktWbGg==", "cpu": [ "arm64" ], @@ -15765,9 +14851,9 @@ } }, "node_modules/vite/node_modules/@esbuild/sunos-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz", - "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.1.tgz", + "integrity": "sha512-IPUW+y4VIjuDVn+OMzHc5FV4GubIwPnsz6ubkvN8cuhEqH81NovB53IUlrlBkPMEPxvNnf79MGBoz8rZ2iW8HA==", "cpu": [ "x64" ], @@ -15782,9 +14868,9 @@ } }, "node_modules/vite/node_modules/@esbuild/win32-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz", - "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.1.tgz", + "integrity": "sha512-RIVRWiljWA6CdVu8zkWcRmGP7iRRIIwvhDKem8UMBjPql2TXM5PkDVvvrzMtj1V+WFPB4K7zkIGM7VzRtFkjdg==", "cpu": [ "arm64" ], @@ -15799,9 +14885,9 @@ } }, "node_modules/vite/node_modules/@esbuild/win32-ia32": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz", - "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.1.tgz", + "integrity": "sha512-2BR5M8CPbptC1AK5JbJT1fWrHLvejwZidKx3UMSF0ecHMa+smhi16drIrCEggkgviBwLYd5nwrFLSl5Kho96RQ==", "cpu": [ "ia32" ], @@ -15816,9 +14902,9 @@ } }, "node_modules/vite/node_modules/@esbuild/win32-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz", - "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.1.tgz", + "integrity": "sha512-d5X6RMYv6taIymSk8JBP+nxv8DQAMY6A51GPgusqLdK9wBz5wWIXy1KjTck6HnjE9hqJzJRdk+1p/t5soSbCtw==", "cpu": [ "x64" ], @@ -15833,9 +14919,9 @@ } }, "node_modules/vite/node_modules/esbuild": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz", - "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==", + "version": "0.27.1", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.1.tgz", + "integrity": "sha512-yY35KZckJJuVVPXpvjgxiCuVEJT67F6zDeVTv4rizyPrfGBUpZQsvmxnN+C371c2esD/hNMjj4tpBhuueLN7aA==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -15846,50 +14932,32 @@ "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.27.2", - "@esbuild/android-arm": "0.27.2", - "@esbuild/android-arm64": "0.27.2", - "@esbuild/android-x64": "0.27.2", - "@esbuild/darwin-arm64": "0.27.2", - "@esbuild/darwin-x64": "0.27.2", - "@esbuild/freebsd-arm64": "0.27.2", - "@esbuild/freebsd-x64": "0.27.2", - "@esbuild/linux-arm": "0.27.2", - "@esbuild/linux-arm64": "0.27.2", - "@esbuild/linux-ia32": "0.27.2", - "@esbuild/linux-loong64": "0.27.2", - "@esbuild/linux-mips64el": "0.27.2", - "@esbuild/linux-ppc64": "0.27.2", - "@esbuild/linux-riscv64": "0.27.2", - "@esbuild/linux-s390x": "0.27.2", - "@esbuild/linux-x64": "0.27.2", - "@esbuild/netbsd-arm64": "0.27.2", - "@esbuild/netbsd-x64": "0.27.2", - "@esbuild/openbsd-arm64": "0.27.2", - "@esbuild/openbsd-x64": "0.27.2", - "@esbuild/openharmony-arm64": "0.27.2", - "@esbuild/sunos-x64": "0.27.2", - "@esbuild/win32-arm64": "0.27.2", - "@esbuild/win32-ia32": "0.27.2", - "@esbuild/win32-x64": "0.27.2" - } - }, - "node_modules/vite/node_modules/fdir": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", - "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } + "@esbuild/aix-ppc64": "0.27.1", + "@esbuild/android-arm": "0.27.1", + "@esbuild/android-arm64": "0.27.1", + "@esbuild/android-x64": "0.27.1", + "@esbuild/darwin-arm64": "0.27.1", + "@esbuild/darwin-x64": "0.27.1", + "@esbuild/freebsd-arm64": "0.27.1", + "@esbuild/freebsd-x64": "0.27.1", + "@esbuild/linux-arm": "0.27.1", + "@esbuild/linux-arm64": "0.27.1", + "@esbuild/linux-ia32": "0.27.1", + "@esbuild/linux-loong64": "0.27.1", + "@esbuild/linux-mips64el": "0.27.1", + "@esbuild/linux-ppc64": "0.27.1", + "@esbuild/linux-riscv64": "0.27.1", + "@esbuild/linux-s390x": "0.27.1", + "@esbuild/linux-x64": "0.27.1", + "@esbuild/netbsd-arm64": "0.27.1", + "@esbuild/netbsd-x64": "0.27.1", + "@esbuild/openbsd-arm64": "0.27.1", + "@esbuild/openbsd-x64": "0.27.1", + "@esbuild/openharmony-arm64": "0.27.1", + "@esbuild/sunos-x64": "0.27.1", + "@esbuild/win32-arm64": "0.27.1", + "@esbuild/win32-ia32": "0.27.1", + "@esbuild/win32-x64": "0.27.1" } }, "node_modules/vite/node_modules/fsevents": { @@ -15907,33 +14975,20 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, - "node_modules/vite/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, "node_modules/vitest": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.16.tgz", - "integrity": "sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==", + "version": "4.0.15", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.15.tgz", + "integrity": "sha512-n1RxDp8UJm6N0IbJLQo+yzLZ2sQCDyl1o0LeugbPWf8+8Fttp29GghsQBjYJVmWq3gBFfe9Hs1spR44vovn2wA==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/expect": "4.0.16", - "@vitest/mocker": "4.0.16", - "@vitest/pretty-format": "4.0.16", - "@vitest/runner": "4.0.16", - "@vitest/snapshot": "4.0.16", - "@vitest/spy": "4.0.16", - "@vitest/utils": "4.0.16", + "@vitest/expect": "4.0.15", + "@vitest/mocker": "4.0.15", + "@vitest/pretty-format": "4.0.15", + "@vitest/runner": "4.0.15", + "@vitest/snapshot": "4.0.15", + "@vitest/spy": "4.0.15", + "@vitest/utils": "4.0.15", "es-module-lexer": "^1.7.0", "expect-type": "^1.2.2", "magic-string": "^0.30.21", @@ -15961,10 +15016,10 @@ "@edge-runtime/vm": "*", "@opentelemetry/api": "^1.9.0", "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", - "@vitest/browser-playwright": "4.0.16", - "@vitest/browser-preview": "4.0.16", - "@vitest/browser-webdriverio": "4.0.16", - "@vitest/ui": "4.0.16", + "@vitest/browser-playwright": "4.0.15", + "@vitest/browser-preview": "4.0.15", + "@vitest/browser-webdriverio": "4.0.15", + "@vitest/ui": "4.0.15", "happy-dom": "*", "jsdom": "*" }, @@ -15998,19 +15053,6 @@ } } }, - "node_modules/vitest/node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, "node_modules/void-elements": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", @@ -16044,13 +15086,26 @@ } }, "node_modules/webidl-conversions": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.0.tgz", - "integrity": "sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", + "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", "dev": true, "license": "BSD-2-Clause", "engines": { - "node": ">=20" + "node": ">=12" + } + }, + "node_modules/whatwg-encoding": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", + "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=18" } }, "node_modules/whatwg-mimetype": { @@ -16064,17 +15119,17 @@ } }, "node_modules/whatwg-url": { - "version": "15.1.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-15.1.0.tgz", - "integrity": "sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==", + "version": "14.2.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.2.0.tgz", + "integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==", "dev": true, "license": "MIT", "dependencies": { - "tr46": "^6.0.0", - "webidl-conversions": "^8.0.0" + "tr46": "^5.1.0", + "webidl-conversions": "^7.0.0" }, "engines": { - "node": ">=20" + "node": ">=18" } }, "node_modules/which": { @@ -16210,18 +15265,18 @@ } }, "node_modules/wrap-ansi": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", - "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "dev": true, "license": "MIT", "dependencies": { - "ansi-styles": "^6.2.1", - "string-width": "^7.0.0", - "strip-ansi": "^7.1.0" + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" }, "engines": { - "node": ">=18" + "node": ">=10" }, "funding": { "url": "https://github.com/chalk/wrap-ansi?sponsor=1" @@ -16246,57 +15301,6 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/emoji-regex": { - "version": "10.6.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", - "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", - "dev": true, - "license": "MIT" - }, - "node_modules/wrap-ansi/node_modules/string-width": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -16440,9 +15444,10 @@ } }, "node_modules/zod": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.4.tgz", - "integrity": "sha512-Zw/uYiiyF6pUT1qmKbZziChgNPRu+ZRneAsMUDU6IwmXdWt5JwcUfy2bvLOCUtz5UniaN/Zx5aFttZYbYc7O/A==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.2.0.tgz", + "integrity": "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw==", + "dev": true, "license": "MIT", "funding": { "url": "https://github.com/sponsors/colinhacks" diff --git a/apps/frontend/package.json b/apps/frontend/package.json index 1561b64046..3b9e8bda37 100644 --- a/apps/frontend/package.json +++ b/apps/frontend/package.json @@ -48,6 +48,7 @@ "typecheck": "tsc --noEmit" }, "dependencies": { + "@anthropic-ai/sdk": "^0.71.2", "@dnd-kit/core": "^6.3.1", "@dnd-kit/sortable": "^10.0.0", "@dnd-kit/utilities": "^3.2.2", @@ -68,6 +69,7 @@ "@radix-ui/react-tabs": "^1.1.13", "@radix-ui/react-toast": "^1.2.15", "@radix-ui/react-tooltip": "^1.2.8", + "@sentry/electron": "^7.5.0", "@tailwindcss/typography": "^0.5.19", "@tanstack/react-virtual": "^3.13.13", "@xterm/addon-fit": "^0.11.0", @@ -78,11 +80,14 @@ "chokidar": "^5.0.0", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", + "dotenv": "^16.6.1", "electron-log": "^5.4.3", "electron-updater": "^6.6.2", "i18next": "^25.7.3", "lucide-react": "^0.562.0", + "minimatch": "^10.1.1", "motion": "^12.23.26", + "proper-lockfile": "^4.1.2", "react": "^19.2.3", "react-dom": "^19.2.3", "react-i18next": "^16.5.0", @@ -102,7 +107,9 @@ "@eslint/js": "^9.39.1", "@playwright/test": "^1.52.0", "@tailwindcss/postcss": "^4.1.17", + "@testing-library/jest-dom": "^6.9.1", "@testing-library/react": "^16.1.0", + "@types/minimatch": "^5.1.2", "@types/node": "^25.0.0", "@types/react": "^19.2.7", "@types/react-dom": "^19.2.3", @@ -111,7 +118,7 @@ "@vitejs/plugin-react": "^5.1.2", "autoprefixer": "^10.4.22", "cross-env": "^10.1.0", - "electron": "^39.2.7", + "electron": "39.2.7", "electron-builder": "^26.0.12", "electron-vite": "^5.0.0", "eslint": "^9.39.1", @@ -207,7 +214,7 @@ ] }, "linux": { - "icon": "resources/icon.png", + "icon": "resources/icons", "target": [ "AppImage", "deb", diff --git a/apps/frontend/resources/icons/128x128.png b/apps/frontend/resources/icons/128x128.png new file mode 100644 index 0000000000..7e694b434c Binary files /dev/null and b/apps/frontend/resources/icons/128x128.png differ diff --git a/apps/frontend/resources/icons/16x16.png b/apps/frontend/resources/icons/16x16.png new file mode 100644 index 0000000000..bc533838b6 Binary files /dev/null and b/apps/frontend/resources/icons/16x16.png differ diff --git a/apps/frontend/resources/icons/256x256.png b/apps/frontend/resources/icons/256x256.png new file mode 100644 index 0000000000..555230d363 Binary files /dev/null and b/apps/frontend/resources/icons/256x256.png differ diff --git a/apps/frontend/resources/icons/32x32.png b/apps/frontend/resources/icons/32x32.png new file mode 100644 index 0000000000..227e6db694 Binary files /dev/null and b/apps/frontend/resources/icons/32x32.png differ diff --git a/apps/frontend/resources/icons/48x48.png b/apps/frontend/resources/icons/48x48.png new file mode 100644 index 0000000000..29e6b3bc03 Binary files /dev/null and b/apps/frontend/resources/icons/48x48.png differ diff --git a/apps/frontend/resources/icons/512x512.png b/apps/frontend/resources/icons/512x512.png new file mode 100644 index 0000000000..22d476ffc1 Binary files /dev/null and b/apps/frontend/resources/icons/512x512.png differ diff --git a/apps/frontend/resources/icons/64x64.png b/apps/frontend/resources/icons/64x64.png new file mode 100644 index 0000000000..0068c05929 Binary files /dev/null and b/apps/frontend/resources/icons/64x64.png differ diff --git a/apps/frontend/scripts/download-python.cjs b/apps/frontend/scripts/download-python.cjs index 215af7db3c..86bc47b338 100644 --- a/apps/frontend/scripts/download-python.cjs +++ b/apps/frontend/scripts/download-python.cjs @@ -609,12 +609,12 @@ function installPackages(pythonBin, requirementsPath, targetSitePackages) { // Install packages directly to target directory // --no-compile: Don't create .pyc files (saves space, Python will work without them) - // --no-cache-dir: Don't use pip cache // --target: Install to specific directory + // Note: We intentionally DO use pip's cache to preserve built wheels for packages + // like real_ladybug that must be compiled from source on Intel Mac (no PyPI wheel) const pipArgs = [ '-m', 'pip', 'install', '--no-compile', - '--no-cache-dir', '--target', targetSitePackages, '-r', requirementsPath, ]; @@ -702,9 +702,32 @@ async function downloadPython(targetPlatform, targetArch, options = {}) { try { const version = verifyPythonBinary(pythonBin); console.log(`[download-python] Verified: ${version}`); - return { success: true, pythonPath: pythonBin, sitePackagesPath: sitePackagesDir }; - } catch { - console.log(`[download-python] Existing installation is broken, re-downloading...`); + + // Verify critical packages exist (fixes GitHub issue #416) + // Without this check, corrupted caches with missing packages would be accepted + // Note: Same list exists in python-env-manager.ts - keep them in sync + // This validation assumes traditional Python packages with __init__.py (not PEP 420 namespace packages) + const criticalPackages = ['claude_agent_sdk', 'dotenv']; + const missingPackages = criticalPackages.filter(pkg => { + const pkgPath = path.join(sitePackagesDir, pkg); + // Check both directory and __init__.py for more robust validation + const initFile = path.join(pkgPath, '__init__.py'); + return !fs.existsSync(pkgPath) || !fs.existsSync(initFile); + }); + + if (missingPackages.length > 0) { + console.log(`[download-python] Critical packages missing or incomplete: ${missingPackages.join(', ')}`); + console.log(`[download-python] Reinstalling packages...`); + // Remove site-packages to force reinstall, keep Python binary + // Flow continues below to re-install packages (skipPackages check at line 794) + fs.rmSync(sitePackagesDir, { recursive: true, force: true }); + } else { + console.log(`[download-python] All critical packages verified`); + return { success: true, pythonPath: pythonBin, sitePackagesPath: sitePackagesDir }; + } + } catch (err) { + const errorMsg = err instanceof Error ? err.message : String(err); + console.log(`[download-python] Existing installation is broken: ${errorMsg}`); fs.rmSync(platformDir, { recursive: true, force: true }); } } @@ -784,6 +807,22 @@ async function downloadPython(targetPlatform, targetArch, options = {}) { // Install packages installPackages(pythonBin, requirementsPath, sitePackagesDir); + // Verify critical packages were installed before creating marker (fixes #416) + // Note: Same list exists in python-env-manager.ts - keep them in sync + // This validation assumes traditional Python packages with __init__.py (not PEP 420 namespace packages) + const criticalPackages = ['claude_agent_sdk', 'dotenv']; + const postInstallMissing = criticalPackages.filter(pkg => { + const pkgPath = path.join(sitePackagesDir, pkg); + const initFile = path.join(pkgPath, '__init__.py'); + return !fs.existsSync(pkgPath) || !fs.existsSync(initFile); + }); + + if (postInstallMissing.length > 0) { + throw new Error(`Package installation failed - missing critical packages: ${postInstallMissing.join(', ')}`); + } + + console.log(`[download-python] All critical packages verified after installation`); + // Create marker file to indicate successful bundling fs.writeFileSync(packagesMarker, JSON.stringify({ bundledAt: new Date().toISOString(), diff --git a/apps/frontend/scripts/postinstall.cjs b/apps/frontend/scripts/postinstall.cjs index 41a8ebe645..e4c02e6dee 100644 --- a/apps/frontend/scripts/postinstall.cjs +++ b/apps/frontend/scripts/postinstall.cjs @@ -42,13 +42,36 @@ To install: ================================================================================ `; +/** + * Get electron version from package.json + */ +function getElectronVersion() { + const pkgPath = path.join(__dirname, '..', 'package.json'); + const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8')); + const electronVersion = pkg.devDependencies?.electron || pkg.dependencies?.electron; + if (!electronVersion) { + return null; + } + // Strip leading ^ or ~ from version + return electronVersion.replace(/^[\^~]/, ''); +} + /** * Run electron-rebuild */ function runElectronRebuild() { return new Promise((resolve, reject) => { const npx = isWindows ? 'npx.cmd' : 'npx'; - const child = spawn(npx, ['electron-rebuild'], { + const electronVersion = getElectronVersion(); + const args = ['electron-rebuild']; + + // Explicitly pass electron version if detected + if (electronVersion) { + args.push('-v', electronVersion); + console.log(`[postinstall] Using Electron version: ${electronVersion}`); + } + + const child = spawn(npx, args, { stdio: 'inherit', shell: isWindows, cwd: path.join(__dirname, '..'), @@ -70,12 +93,40 @@ function runElectronRebuild() { * Check if node-pty is already built */ function isNodePtyBuilt() { - const buildDir = path.join(__dirname, '..', 'node_modules', 'node-pty', 'build', 'Release'); - if (!fs.existsSync(buildDir)) return false; + // Check traditional node-pty build location (local node_modules) + const localBuildDir = path.join(__dirname, '..', 'node_modules', 'node-pty', 'build', 'Release'); + if (fs.existsSync(localBuildDir)) { + const files = fs.readdirSync(localBuildDir); + if (files.some((f) => f.endsWith('.node'))) return true; + } + + // Check root node_modules (for npm workspaces) + const rootBuildDir = path.join(__dirname, '..', '..', '..', 'node_modules', 'node-pty', 'build', 'Release'); + if (fs.existsSync(rootBuildDir)) { + const files = fs.readdirSync(rootBuildDir); + if (files.some((f) => f.endsWith('.node'))) return true; + } + + // Check for @lydell/node-pty with platform-specific prebuilts + const arch = os.arch(); + const platform = os.platform(); + const platformPkg = `@lydell/node-pty-${platform}-${arch}`; + + // Check local node_modules + const localLydellDir = path.join(__dirname, '..', 'node_modules', platformPkg); + if (fs.existsSync(localLydellDir)) { + const files = fs.readdirSync(localLydellDir); + if (files.some((f) => f.endsWith('.node'))) return true; + } + + // Check root node_modules (for npm workspaces) + const rootLydellDir = path.join(__dirname, '..', '..', '..', 'node_modules', platformPkg); + if (fs.existsSync(rootLydellDir)) { + const files = fs.readdirSync(rootLydellDir); + if (files.some((f) => f.endsWith('.node'))) return true; + } - // Check for the main .node file - const files = fs.readdirSync(buildDir); - return files.some((f) => f.endsWith('.node')); + return false; } /** diff --git a/apps/frontend/src/__mocks__/electron.ts b/apps/frontend/src/__mocks__/electron.ts index 39f45801de..e5569f6893 100644 --- a/apps/frontend/src/__mocks__/electron.ts +++ b/apps/frontend/src/__mocks__/electron.ts @@ -56,7 +56,8 @@ export const ipcRenderer = { on: vi.fn(), once: vi.fn(), removeListener: vi.fn(), - removeAllListeners: vi.fn() + removeAllListeners: vi.fn(), + setMaxListeners: vi.fn() }; // Mock BrowserWindow @@ -125,6 +126,13 @@ export const nativeTheme = { on: vi.fn() }; +// Mock screen +export const screen = { + getPrimaryDisplay: vi.fn(() => ({ + workAreaSize: { width: 1920, height: 1080 } + })) +}; + export default { app, ipcMain, @@ -133,5 +141,6 @@ export default { dialog, contextBridge, shell, - nativeTheme + nativeTheme, + screen }; diff --git a/apps/frontend/src/__mocks__/sentry-electron-main.ts b/apps/frontend/src/__mocks__/sentry-electron-main.ts new file mode 100644 index 0000000000..697d392257 --- /dev/null +++ b/apps/frontend/src/__mocks__/sentry-electron-main.ts @@ -0,0 +1 @@ +export * from './sentry-electron-shared'; diff --git a/apps/frontend/src/__mocks__/sentry-electron-renderer.ts b/apps/frontend/src/__mocks__/sentry-electron-renderer.ts new file mode 100644 index 0000000000..697d392257 --- /dev/null +++ b/apps/frontend/src/__mocks__/sentry-electron-renderer.ts @@ -0,0 +1 @@ +export * from './sentry-electron-shared'; diff --git a/apps/frontend/src/__mocks__/sentry-electron-shared.ts b/apps/frontend/src/__mocks__/sentry-electron-shared.ts new file mode 100644 index 0000000000..e2c97e98fe --- /dev/null +++ b/apps/frontend/src/__mocks__/sentry-electron-shared.ts @@ -0,0 +1,26 @@ +export type SentryErrorEvent = Record; + +export type SentryScope = { + setContext: (key: string, value: Record) => void; +}; + +export type SentryInitOptions = { + beforeSend?: (event: SentryErrorEvent) => SentryErrorEvent | null; + tracesSampleRate?: number; + profilesSampleRate?: number; + dsn?: string; + environment?: string; + release?: string; + debug?: boolean; + enabled?: boolean; +}; + +export function init(_options: SentryInitOptions): void {} + +export function captureException(_error: Error): void {} + +export function withScope(callback: (scope: SentryScope) => void): void { + callback({ + setContext: () => {} + }); +} diff --git a/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts b/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts index 641f8e968b..432c5f361d 100644 --- a/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts +++ b/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts @@ -11,7 +11,8 @@ const mockIpcRenderer = { on: vi.fn(), once: vi.fn(), removeListener: vi.fn(), - removeAllListeners: vi.fn() + removeAllListeners: vi.fn(), + setMaxListeners: vi.fn() }; // Mock contextBridge diff --git a/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts b/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts index 1ef0da9ded..1d9e0540e1 100644 --- a/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts +++ b/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts @@ -30,9 +30,13 @@ const mockProcess = Object.assign(new EventEmitter(), { }) }); -vi.mock('child_process', () => ({ - spawn: vi.fn(() => mockProcess) -})); +vi.mock('child_process', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + spawn: vi.fn(() => mockProcess) + }; +}); // Mock claude-profile-manager to bypass auth checks in tests vi.mock('../../main/claude-profile-manager', () => ({ @@ -107,7 +111,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test task description'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test task description'); expect(spawn).toHaveBeenCalledWith( EXPECTED_PYTHON_COMMAND, @@ -132,7 +136,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001'); + await manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001'); expect(spawn).toHaveBeenCalledWith( EXPECTED_PYTHON_COMMAND, @@ -154,7 +158,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startQAProcess('task-1', TEST_PROJECT_PATH, 'spec-001'); + await manager.startQAProcess('task-1', TEST_PROJECT_PATH, 'spec-001'); expect(spawn).toHaveBeenCalledWith( EXPECTED_PYTHON_COMMAND, @@ -178,7 +182,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001', { + await manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001', { parallel: true, workers: 4 }); @@ -204,7 +208,7 @@ describe('Subprocess Spawn Integration', () => { const logHandler = vi.fn(); manager.on('log', logHandler); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); // Simulate stdout data (must include newline for buffered output processing) mockStdout.emit('data', Buffer.from('Test log output\n')); @@ -220,7 +224,7 @@ describe('Subprocess Spawn Integration', () => { const logHandler = vi.fn(); manager.on('log', logHandler); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); // Simulate stderr data (must include newline for buffered output processing) mockStderr.emit('data', Buffer.from('Progress: 50%\n')); @@ -236,7 +240,7 @@ describe('Subprocess Spawn Integration', () => { const exitHandler = vi.fn(); manager.on('exit', exitHandler); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); // Simulate process exit mockProcess.emit('exit', 0); @@ -253,7 +257,7 @@ describe('Subprocess Spawn Integration', () => { const errorHandler = vi.fn(); manager.on('error', errorHandler); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); // Simulate process error mockProcess.emit('error', new Error('Spawn failed')); @@ -266,7 +270,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); expect(manager.isRunning('task-1')).toBe(true); @@ -293,10 +297,10 @@ describe('Subprocess Spawn Integration', () => { manager.configure(undefined, AUTO_CLAUDE_SOURCE); expect(manager.getRunningTasks()).toHaveLength(0); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); expect(manager.getRunningTasks()).toContain('task-1'); - manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001'); + await manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001'); expect(manager.getRunningTasks()).toHaveLength(2); }); @@ -307,7 +311,7 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure('/custom/python3', AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test'); expect(spawn).toHaveBeenCalledWith( '/custom/python3', @@ -321,8 +325,8 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); - manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); + await manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001'); await manager.killAll(); @@ -334,10 +338,10 @@ describe('Subprocess Spawn Integration', () => { const manager = new AgentManager(); manager.configure(undefined, AUTO_CLAUDE_SOURCE); - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1'); // Start another process for same task - manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 2'); + await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 2'); // Should have killed the first one expect(mockProcess.kill).toHaveBeenCalled(); diff --git a/apps/frontend/src/__tests__/setup.ts b/apps/frontend/src/__tests__/setup.ts index 34f7a6465f..dc2c99dd91 100644 --- a/apps/frontend/src/__tests__/setup.ts +++ b/apps/frontend/src/__tests__/setup.ts @@ -28,6 +28,14 @@ Object.defineProperty(global, 'localStorage', { value: localStorageMock }); +// Mock scrollIntoView for Radix Select in jsdom +if (typeof HTMLElement !== 'undefined' && !HTMLElement.prototype.scrollIntoView) { + Object.defineProperty(HTMLElement.prototype, 'scrollIntoView', { + value: vi.fn(), + writable: true + }); +} + // Test data directory for isolated file operations export const TEST_DATA_DIR = '/tmp/auto-claude-ui-tests'; @@ -88,7 +96,14 @@ if (typeof window !== 'undefined') { success: true, data: { openProjectIds: [], activeProjectId: null, tabOrder: [] } }), - saveTabState: vi.fn().mockResolvedValue({ success: true }) + saveTabState: vi.fn().mockResolvedValue({ success: true }), + // Profile-related API methods (API Profile feature) + getAPIProfiles: vi.fn(), + saveAPIProfile: vi.fn(), + updateAPIProfile: vi.fn(), + deleteAPIProfile: vi.fn(), + setActiveAPIProfile: vi.fn(), + testConnection: vi.fn() }; } diff --git a/apps/frontend/src/main/__tests__/claude-cli-utils.test.ts b/apps/frontend/src/main/__tests__/claude-cli-utils.test.ts new file mode 100644 index 0000000000..42bd919b3b --- /dev/null +++ b/apps/frontend/src/main/__tests__/claude-cli-utils.test.ts @@ -0,0 +1,126 @@ +import path from 'path'; +import { beforeEach, describe, expect, it, vi } from 'vitest'; + +const mockGetToolPath = vi.fn<() => string>(); +const mockGetAugmentedEnv = vi.fn<() => Record>(); + +vi.mock('../cli-tool-manager', () => ({ + getToolPath: mockGetToolPath, +})); + +vi.mock('../env-utils', () => ({ + getAugmentedEnv: mockGetAugmentedEnv, +})); + +describe('claude-cli-utils', () => { + beforeEach(() => { + mockGetToolPath.mockReset(); + mockGetAugmentedEnv.mockReset(); + vi.resetModules(); + }); + + it('prepends the CLI directory to PATH when the command is absolute', async () => { + const command = process.platform === 'win32' + ? 'C:\\Tools\\claude\\claude.exe' + : '/opt/claude/bin/claude'; + const env = { + PATH: process.platform === 'win32' + ? 'C:\\Windows\\System32' + : '/usr/bin', + HOME: '/tmp', + }; + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + const separator = process.platform === 'win32' ? ';' : ':'; + expect(result.command).toBe(command); + expect(result.env.PATH.split(separator)[0]).toBe(path.dirname(command)); + expect(result.env.HOME).toBe(env.HOME); + }); + + it('sets PATH to the command directory when PATH is empty', async () => { + const command = process.platform === 'win32' + ? 'C:\\Tools\\claude\\claude.exe' + : '/opt/claude/bin/claude'; + const env = { PATH: '' }; + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.env.PATH).toBe(path.dirname(command)); + }); + + it('sets PATH to the command directory when PATH is missing', async () => { + const command = process.platform === 'win32' + ? 'C:\\Tools\\claude\\claude.exe' + : '/opt/claude/bin/claude'; + const env = {}; + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.env.PATH).toBe(path.dirname(command)); + }); + + it('keeps PATH unchanged when the command is not absolute', async () => { + const env = { + PATH: process.platform === 'win32' + ? 'C:\\Windows;C:\\Windows\\System32' + : '/usr/bin:/bin', + }; + mockGetToolPath.mockReturnValue('claude'); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.command).toBe('claude'); + expect(result.env.PATH).toBe(env.PATH); + }); + + it('does not duplicate the command directory in PATH', async () => { + const command = process.platform === 'win32' + ? 'C:\\Tools\\claude\\claude.exe' + : '/opt/claude/bin/claude'; + const commandDir = path.dirname(command); + const separator = process.platform === 'win32' ? ';' : ':'; + const env = { PATH: `${commandDir}${separator}/usr/bin` }; + + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.env.PATH).toBe(env.PATH); + }); + + it('treats PATH entries case-insensitively on Windows', async () => { + const originalPlatform = Object.getOwnPropertyDescriptor(process, 'platform'); + Object.defineProperty(process, 'platform', { value: 'win32' }); + + try { + const command = 'C:\\Tools\\claude\\claude.exe'; + const env = { PATH: 'c:\\tools\\claude;C:\\Windows' }; + + mockGetToolPath.mockReturnValue(command); + mockGetAugmentedEnv.mockReturnValue(env); + + const { getClaudeCliInvocation } = await import('../claude-cli-utils'); + const result = getClaudeCliInvocation(); + + expect(result.env.PATH).toBe(env.PATH); + } finally { + if (originalPlatform) { + Object.defineProperty(process, 'platform', originalPlatform); + } + } + }); +}); diff --git a/apps/frontend/src/main/__tests__/cli-tool-manager.test.ts b/apps/frontend/src/main/__tests__/cli-tool-manager.test.ts new file mode 100644 index 0000000000..b39c588a6d --- /dev/null +++ b/apps/frontend/src/main/__tests__/cli-tool-manager.test.ts @@ -0,0 +1,469 @@ +/** + * Unit tests for cli-tool-manager + * Tests CLI tool detection with focus on NVM path detection + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { existsSync, readdirSync } from 'fs'; +import os from 'os'; +import { execFileSync } from 'child_process'; +import { app } from 'electron'; +import { + getToolInfo, + clearToolCache, + getClaudeDetectionPaths, + sortNvmVersionDirs, + buildClaudeDetectionResult +} from '../cli-tool-manager'; + +// Mock Electron app +vi.mock('electron', () => ({ + app: { + isPackaged: false, + getPath: vi.fn() + } +})); + +// Mock os module +vi.mock('os', () => ({ + default: { + homedir: vi.fn(() => '/mock/home') + } +})); + +// Mock fs module - need to mock both sync and promises +vi.mock('fs', () => { + const mockDirent = ( + name: string, + isDir: boolean + ): { name: string; isDirectory: () => boolean } => ({ + name, + isDirectory: () => isDir + }); + + return { + existsSync: vi.fn(), + readdirSync: vi.fn(), + promises: {} + }; +}); + +// Mock child_process for execFileSync and execFile (used in validation) +vi.mock('child_process', () => ({ + execFileSync: vi.fn(), + execFile: vi.fn() +})); + +// Mock env-utils to avoid PATH augmentation complexity +vi.mock('../env-utils', () => ({ + findExecutable: vi.fn(() => null), // Return null to force platform-specific path checking + getAugmentedEnv: vi.fn(() => ({ PATH: '' })) +})); + +// Mock homebrew-python utility +vi.mock('../utils/homebrew-python', () => ({ + findHomebrewPython: vi.fn(() => null) +})); + +describe('cli-tool-manager - Claude CLI NVM detection', () => { + beforeEach(() => { + vi.clearAllMocks(); + // Set default platform to Linux + Object.defineProperty(process, 'platform', { + value: 'linux', + writable: true + }); + }); + + afterEach(() => { + clearToolCache(); + }); + + const mockHomeDir = '/mock/home'; + + describe('NVM path detection on Unix/Linux/macOS', () => { + it('should detect Claude CLI in NVM directory when multiple Node versions exist', () => { + // Mock home directory + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + // Mock NVM directory exists + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + // NVM versions directory exists + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + // Claude CLI exists in v22.17.0 + if (pathStr.includes('v22.17.0/bin/claude')) { + return true; + } + return false; + }); + + // Mock readdirSync to return Node version directories + vi.mocked(readdirSync).mockImplementation((filePath, options) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return [ + { name: 'v20.11.0', isDirectory: () => true }, + { name: 'v22.17.0', isDirectory: () => true } + ] as any; + } + return [] as any; + }); + + // Mock execFileSync to return version for validation + vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n'); + + const result = getToolInfo('claude'); + + expect(result.found).toBe(true); + expect(result.path).toContain('v22.17.0'); + expect(result.path).toContain('bin/claude'); + expect(result.source).toBe('nvm'); + }); + + it('should try multiple NVM Node versions until finding Claude CLI', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + // Only v24.12.0 has Claude CLI + if (pathStr.includes('v24.12.0/bin/claude')) { + return true; + } + return false; + }); + + vi.mocked(readdirSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return [ + { name: 'v18.20.0', isDirectory: () => true }, + { name: 'v20.11.0', isDirectory: () => true }, + { name: 'v24.12.0', isDirectory: () => true } + ] as any; + } + return [] as any; + }); + + vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n'); + + const result = getToolInfo('claude'); + + expect(result.found).toBe(true); + expect(result.path).toContain('v24.12.0'); + expect(result.source).toBe('nvm'); + }); + + it('should skip non-version directories in NVM (e.g., does not start with "v")', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + // Only the correctly named version has Claude + if (pathStr.includes('v22.17.0/bin/claude')) { + return true; + } + return false; + }); + + vi.mocked(readdirSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return [ + { name: 'current', isDirectory: () => true }, // Should be skipped + { name: 'system', isDirectory: () => true }, // Should be skipped + { name: 'v22.17.0', isDirectory: () => true } // Should be checked + ] as any; + } + return [] as any; + }); + + vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n'); + + const result = getToolInfo('claude'); + + expect(result.found).toBe(true); + expect(result.path).toContain('v22.17.0'); + }); + + it('should not check NVM paths on Windows', () => { + Object.defineProperty(process, 'platform', { + value: 'win32', + writable: true + }); + + vi.mocked(os.homedir).mockReturnValue('C:\\Users\\test'); + + // Even if NVM directory exists on Windows, should not check it + vi.mocked(existsSync).mockReturnValue(false); + vi.mocked(readdirSync).mockReturnValue([]); + + const result = getToolInfo('claude'); + + // Should not be found from NVM on Windows + expect(result.source).not.toBe('nvm'); + }); + + it('should handle missing NVM directory gracefully', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + // NVM directory does not exist + vi.mocked(existsSync).mockReturnValue(false); + + const result = getToolInfo('claude'); + + // Should not find via NVM + expect(result.source).not.toBe('nvm'); + expect(result.found).toBe(false); + }); + + it('should handle readdirSync errors gracefully', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockReturnValue(true); + vi.mocked(readdirSync).mockImplementation(() => { + throw new Error('Permission denied'); + }); + + const result = getToolInfo('claude'); + + // Should not crash, should fall back to other detection methods + expect(result.source).not.toBe('nvm'); + }); + + it('should validate Claude CLI before returning NVM path', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + if (pathStr.includes('v22.17.0/bin/claude')) { + return true; + } + return false; + }); + + vi.mocked(readdirSync).mockImplementation(() => { + return [{ name: 'v22.17.0', isDirectory: () => true }] as any; + }); + + // Mock validation failure (execFileSync throws) + vi.mocked(execFileSync).mockImplementation(() => { + throw new Error('Command failed'); + }); + + const result = getToolInfo('claude'); + + // Should not return unvalidated path + expect(result.found).toBe(false); + expect(result.source).not.toBe('nvm'); + }); + + it('should handle NVM directory with no version subdirectories', () => { + vi.mocked(os.homedir).mockReturnValue(mockHomeDir); + + vi.mocked(existsSync).mockImplementation((filePath) => { + return String(filePath).includes('.nvm/versions/node'); + }); + + // Empty NVM directory + vi.mocked(readdirSync).mockReturnValue([]); + + const result = getToolInfo('claude'); + + expect(result.source).not.toBe('nvm'); + }); + }); + + describe('NVM on macOS', () => { + it('should detect Claude CLI via NVM on macOS', () => { + Object.defineProperty(process, 'platform', { + value: 'darwin', + writable: true + }); + + vi.mocked(os.homedir).mockReturnValue('/Users/test'); + + vi.mocked(existsSync).mockImplementation((filePath) => { + const pathStr = String(filePath); + if (pathStr.includes('.nvm/versions/node')) { + return true; + } + if (pathStr.includes('v22.17.0/bin/claude')) { + return true; + } + return false; + }); + + vi.mocked(readdirSync).mockImplementation(() => { + return [{ name: 'v22.17.0', isDirectory: () => true }] as any; + }); + + vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n'); + + const result = getToolInfo('claude'); + + expect(result.found).toBe(true); + expect(result.source).toBe('nvm'); + expect(result.path).toContain('v22.17.0'); + }); + }); +}); + +/** + * Unit tests for helper functions + */ +describe('cli-tool-manager - Helper Functions', () => { + describe('getClaudeDetectionPaths', () => { + it('should return homebrew paths on macOS', () => { + Object.defineProperty(process, 'platform', { + value: 'darwin', + writable: true + }); + + const paths = getClaudeDetectionPaths('/Users/test'); + + expect(paths.homebrewPaths).toContain('/opt/homebrew/bin/claude'); + expect(paths.homebrewPaths).toContain('/usr/local/bin/claude'); + }); + + it('should return Windows paths on win32', () => { + Object.defineProperty(process, 'platform', { + value: 'win32', + writable: true + }); + + const paths = getClaudeDetectionPaths('C:\\Users\\test'); + + // Windows paths should include AppData and Program Files + expect(paths.platformPaths.some(p => p.includes('AppData'))).toBe(true); + expect(paths.platformPaths.some(p => p.includes('Program Files'))).toBe(true); + }); + + it('should return Unix paths on Linux', () => { + Object.defineProperty(process, 'platform', { + value: 'linux', + writable: true + }); + + const paths = getClaudeDetectionPaths('/home/test'); + + expect(paths.platformPaths.some(p => p.includes('.local/bin/claude'))).toBe(true); + expect(paths.platformPaths.some(p => p.includes('bin/claude'))).toBe(true); + }); + + it('should return correct NVM versions directory', () => { + const paths = getClaudeDetectionPaths('/home/test'); + + expect(paths.nvmVersionsDir).toBe('/home/test/.nvm/versions/node'); + }); + }); + + describe('sortNvmVersionDirs', () => { + it('should sort versions in descending order (newest first)', () => { + const entries = [ + { name: 'v18.20.0', isDirectory: () => true }, + { name: 'v22.17.0', isDirectory: () => true }, + { name: 'v20.11.0', isDirectory: () => true } + ]; + + const sorted = sortNvmVersionDirs(entries); + + expect(sorted).toEqual(['v22.17.0', 'v20.11.0', 'v18.20.0']); + }); + + it('should filter out non-version directories', () => { + const entries = [ + { name: 'v20.11.0', isDirectory: () => true }, + { name: '.DS_Store', isDirectory: () => false }, + { name: 'node_modules', isDirectory: () => true }, + { name: 'current', isDirectory: () => true }, + { name: 'v22.17.0', isDirectory: () => true } + ]; + + const sorted = sortNvmVersionDirs(entries); + + expect(sorted).toEqual(['v22.17.0', 'v20.11.0']); + expect(sorted).not.toContain('.DS_Store'); + expect(sorted).not.toContain('node_modules'); + expect(sorted).not.toContain('current'); + }); + + it('should return empty array when no valid versions', () => { + const entries = [ + { name: 'current', isDirectory: () => true }, + { name: 'system', isDirectory: () => true } + ]; + + const sorted = sortNvmVersionDirs(entries); + + expect(sorted).toEqual([]); + }); + + it('should handle single entry', () => { + const entries = [{ name: 'v20.11.0', isDirectory: () => true }]; + + const sorted = sortNvmVersionDirs(entries); + + expect(sorted).toEqual(['v20.11.0']); + }); + + it('should handle empty array', () => { + const sorted = sortNvmVersionDirs([]); + + expect(sorted).toEqual([]); + }); + }); + + describe('buildClaudeDetectionResult', () => { + it('should return null when validation fails', () => { + const result = buildClaudeDetectionResult( + '/path/to/claude', + { valid: false, message: 'Invalid CLI' }, + 'nvm', + 'Found via NVM' + ); + + expect(result).toBeNull(); + }); + + it('should return proper result when validation succeeds', () => { + const result = buildClaudeDetectionResult( + '/path/to/claude', + { valid: true, version: '1.0.0', message: 'Valid' }, + 'nvm', + 'Found via NVM' + ); + + expect(result).not.toBeNull(); + expect(result?.found).toBe(true); + expect(result?.path).toBe('/path/to/claude'); + expect(result?.version).toBe('1.0.0'); + expect(result?.source).toBe('nvm'); + expect(result?.message).toContain('Found via NVM'); + expect(result?.message).toContain('/path/to/claude'); + }); + + it('should include path in message', () => { + const result = buildClaudeDetectionResult( + '/home/user/.nvm/versions/node/v22.17.0/bin/claude', + { valid: true, version: '2.0.0', message: 'OK' }, + 'nvm', + 'Detected Claude CLI' + ); + + expect(result?.message).toContain('Detected Claude CLI'); + expect(result?.message).toContain('/home/user/.nvm/versions/node/v22.17.0/bin/claude'); + }); + }); +}); diff --git a/apps/frontend/src/main/__tests__/env-handlers-claude-cli.test.ts b/apps/frontend/src/main/__tests__/env-handlers-claude-cli.test.ts new file mode 100644 index 0000000000..bbcbdc354a --- /dev/null +++ b/apps/frontend/src/main/__tests__/env-handlers-claude-cli.test.ts @@ -0,0 +1,232 @@ +import { EventEmitter } from 'events'; +import path from 'path'; +import { beforeEach, describe, expect, it, vi } from 'vitest'; +import { IPC_CHANNELS } from '../../shared/constants'; +const { + mockGetClaudeCliInvocation, + mockGetClaudeCliInvocationAsync, + mockGetProject, + spawnMock, + mockIpcMain, +} = vi.hoisted(() => { + const ipcMain = new (class { + handlers = new Map(); + + handle(channel: string, handler: Function): void { + this.handlers.set(channel, handler); + } + + getHandler(channel: string): Function | undefined { + return this.handlers.get(channel); + } + })(); + + return { + mockGetClaudeCliInvocation: vi.fn(), + mockGetClaudeCliInvocationAsync: vi.fn(), + mockGetProject: vi.fn(), + spawnMock: vi.fn(), + mockIpcMain: ipcMain, + }; +}); + +vi.mock('../claude-cli-utils', () => ({ + getClaudeCliInvocation: mockGetClaudeCliInvocation, + getClaudeCliInvocationAsync: mockGetClaudeCliInvocationAsync, +})); + +vi.mock('../project-store', () => ({ + projectStore: { + getProject: mockGetProject, + }, +})); + +vi.mock('child_process', () => ({ + spawn: spawnMock, +})); + +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return path.join('/tmp', 'userData'); + return '/tmp'; + }), + }, + ipcMain: mockIpcMain, +})); + +import { registerEnvHandlers } from '../ipc-handlers/env-handlers'; + +function createProc(): EventEmitter & { stdout?: EventEmitter; stderr?: EventEmitter } { + const proc = new EventEmitter() as EventEmitter & { + stdout?: EventEmitter; + stderr?: EventEmitter; + }; + proc.stdout = new EventEmitter(); + proc.stderr = new EventEmitter(); + return proc; +} + +// Helper to flush all pending promises (needed for async mock resolution) +function flushPromises(): Promise { + return new Promise(resolve => setTimeout(resolve, 0)); +} + +describe('env-handlers Claude CLI usage', () => { + beforeEach(() => { + mockGetClaudeCliInvocation.mockReset(); + mockGetClaudeCliInvocationAsync.mockReset(); + mockGetProject.mockReset(); + spawnMock.mockReset(); + }); + + it('uses resolved Claude CLI path/env for auth checks', async () => { + const claudeEnv = { PATH: '/opt/claude/bin:/usr/bin' }; + const command = '/opt/claude/bin/claude'; + mockGetClaudeCliInvocationAsync.mockResolvedValue({ + command, + env: claudeEnv, + }); + mockGetProject.mockReturnValue({ id: 'p1', path: '/tmp/project' }); + + const procs: ReturnType[] = []; + spawnMock.mockImplementation(() => { + const proc = createProc(); + procs.push(proc); + return proc; + }); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH); + if (!handler) { + throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered'); + } + + const resultPromise = handler({}, 'p1'); + // Wait for async CLI resolution before checking spawn + await flushPromises(); + expect(spawnMock).toHaveBeenCalledTimes(1); + expect(spawnMock).toHaveBeenCalledWith( + command, + ['--version'], + expect.objectContaining({ cwd: '/tmp/project', env: claudeEnv, shell: false }) + ); + + procs[0].emit('close', 0); + await Promise.resolve(); + + expect(spawnMock).toHaveBeenCalledTimes(2); + expect(spawnMock).toHaveBeenCalledWith( + command, + ['api', '--help'], + expect.objectContaining({ cwd: '/tmp/project', env: claudeEnv, shell: false }) + ); + + procs[1].emit('close', 0); + + const result = await resultPromise; + expect(result).toEqual({ success: true, data: { success: true, authenticated: true } }); + }); + + it('uses resolved Claude CLI path/env for setup-token', async () => { + const claudeEnv = { PATH: '/opt/claude/bin:/usr/bin' }; + const command = '/opt/claude/bin/claude'; + mockGetClaudeCliInvocationAsync.mockResolvedValue({ + command, + env: claudeEnv, + }); + mockGetProject.mockReturnValue({ id: 'p2', path: '/tmp/project' }); + + const proc = createProc(); + spawnMock.mockReturnValue(proc); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_INVOKE_CLAUDE_SETUP); + if (!handler) { + throw new Error('ENV_INVOKE_CLAUDE_SETUP handler not registered'); + } + + const resultPromise = handler({}, 'p2'); + // Wait for async CLI resolution before checking spawn + await flushPromises(); + expect(spawnMock).toHaveBeenCalledWith( + command, + ['setup-token'], + expect.objectContaining({ + cwd: '/tmp/project', + env: claudeEnv, + shell: false, + stdio: 'inherit' + }) + ); + + proc.emit('close', 0); + const result = await resultPromise; + expect(result).toEqual({ success: true, data: { success: true, authenticated: true } }); + }); + + it('returns an error when Claude CLI resolution throws', async () => { + mockGetClaudeCliInvocationAsync.mockRejectedValue(new Error('Claude CLI exploded')); + mockGetProject.mockReturnValue({ id: 'p3', path: '/tmp/project' }); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH); + if (!handler) { + throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered'); + } + + const result = await handler({}, 'p3'); + expect(result.success).toBe(false); + expect(result.error).toContain('Claude CLI exploded'); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it('returns an error when Claude CLI command is missing', async () => { + mockGetClaudeCliInvocationAsync.mockResolvedValue({ command: '', env: {} }); + mockGetProject.mockReturnValue({ id: 'p4', path: '/tmp/project' }); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH); + if (!handler) { + throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered'); + } + + const result = await handler({}, 'p4'); + expect(result.success).toBe(false); + expect(result.error).toContain('Claude CLI path not resolved'); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it('returns an error when Claude CLI exits with a non-zero code', async () => { + const claudeEnv = { PATH: '/opt/claude/bin:/usr/bin' }; + const command = '/opt/claude/bin/claude'; + mockGetClaudeCliInvocationAsync.mockResolvedValue({ + command, + env: claudeEnv, + }); + mockGetProject.mockReturnValue({ id: 'p5', path: '/tmp/project' }); + + const proc = createProc(); + spawnMock.mockReturnValue(proc); + + registerEnvHandlers(() => null); + const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH); + if (!handler) { + throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered'); + } + + const resultPromise = handler({}, 'p5'); + // Wait for async CLI resolution before checking spawn + await flushPromises(); + expect(spawnMock).toHaveBeenCalledWith( + command, + ['--version'], + expect.objectContaining({ cwd: '/tmp/project', env: claudeEnv, shell: false }) + ); + proc.emit('close', 1); + + const result = await resultPromise; + expect(result.success).toBe(false); + expect(result.error).toContain('Claude CLI not found'); + }); +}); diff --git a/apps/frontend/src/main/__tests__/insights-config.test.ts b/apps/frontend/src/main/__tests__/insights-config.test.ts new file mode 100644 index 0000000000..5775d65ab0 --- /dev/null +++ b/apps/frontend/src/main/__tests__/insights-config.test.ts @@ -0,0 +1,99 @@ +/** + * @vitest-environment node + */ +import path from 'path'; +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { InsightsConfig } from '../insights/config'; + +vi.mock('electron', () => ({ + app: { + getAppPath: () => '/app', + getPath: () => '/tmp', + isPackaged: false + } +})); + +vi.mock('../rate-limit-detector', () => ({ + getProfileEnv: () => ({ CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token' }) +})); + +const mockGetApiProfileEnv = vi.fn(); +vi.mock('../services/profile', () => ({ + getAPIProfileEnv: (...args: unknown[]) => mockGetApiProfileEnv(...args) +})); + +const mockGetPythonEnv = vi.fn(); +vi.mock('../python-env-manager', () => ({ + pythonEnvManager: { + getPythonEnv: () => mockGetPythonEnv() + } +})); + +describe('InsightsConfig', () => { + const originalEnv = { ...process.env }; + + beforeEach(() => { + process.env = { ...originalEnv, TEST_ENV: 'ok' }; + mockGetApiProfileEnv.mockResolvedValue({ + ANTHROPIC_BASE_URL: 'https://api.z.ai', + ANTHROPIC_AUTH_TOKEN: 'key' + }); + mockGetPythonEnv.mockReturnValue({ PYTHONPATH: '/site-packages' }); + }); + + afterEach(() => { + process.env = { ...originalEnv }; + vi.clearAllMocks(); + vi.restoreAllMocks(); + }); + + it('should build process env with python and profile settings', async () => { + const config = new InsightsConfig(); + vi.spyOn(config, 'loadAutoBuildEnv').mockReturnValue({ CUSTOM_ENV: '1' }); + vi.spyOn(config, 'getAutoBuildSourcePath').mockReturnValue('/backend'); + + const env = await config.getProcessEnv(); + + expect(env.TEST_ENV).toBe('ok'); + expect(env.CUSTOM_ENV).toBe('1'); + expect(env.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token'); + expect(env.ANTHROPIC_BASE_URL).toBe('https://api.z.ai'); + expect(env.ANTHROPIC_AUTH_TOKEN).toBe('key'); + expect(env.PYTHONPATH).toBe(['/site-packages', '/backend'].join(path.delimiter)); + }); + + it('should clear ANTHROPIC env vars in OAuth mode when no API profile is set', async () => { + const config = new InsightsConfig(); + mockGetApiProfileEnv.mockResolvedValue({}); + process.env = { + ...originalEnv, + ANTHROPIC_AUTH_TOKEN: 'stale-token', + ANTHROPIC_BASE_URL: 'https://stale.example' + }; + + const env = await config.getProcessEnv(); + + expect(env.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(env.ANTHROPIC_BASE_URL).toBe(''); + }); + + it('should set PYTHONPATH only to auto-build path when python env has none', async () => { + const config = new InsightsConfig(); + mockGetPythonEnv.mockReturnValue({}); + vi.spyOn(config, 'getAutoBuildSourcePath').mockReturnValue('/backend'); + + const env = await config.getProcessEnv(); + + expect(env.PYTHONPATH).toBe('/backend'); + }); + + it('should keep PYTHONPATH from python env when auto-build path is missing', async () => { + const config = new InsightsConfig(); + mockGetPythonEnv.mockReturnValue({ PYTHONPATH: '/site-packages' }); + vi.spyOn(config, 'getAutoBuildSourcePath').mockReturnValue(null); + + const env = await config.getProcessEnv(); + + expect(env.PYTHONPATH).toBe('/site-packages'); + }); +}); diff --git a/apps/frontend/src/main/__tests__/ipc-handlers.test.ts b/apps/frontend/src/main/__tests__/ipc-handlers.test.ts index 86699e5c7c..c969ca335a 100644 --- a/apps/frontend/src/main/__tests__/ipc-handlers.test.ts +++ b/apps/frontend/src/main/__tests__/ipc-handlers.test.ts @@ -139,7 +139,8 @@ function cleanupTestDirs(): void { } } -describe('IPC Handlers', () => { +// Increase timeout for all tests in this file due to dynamic imports and setup overhead +describe('IPC Handlers', { timeout: 15000 }, () => { let ipcMain: EventEmitter & { handlers: Map; invokeHandler: (channel: string, event: unknown, ...args: unknown[]) => Promise; @@ -519,7 +520,8 @@ describe('IPC Handlers', () => { expect(mockMainWindow.webContents.send).toHaveBeenCalledWith( 'task:log', 'task-1', - 'Test log message' + 'Test log message', + undefined // projectId is undefined when task not found ); }); @@ -532,7 +534,8 @@ describe('IPC Handlers', () => { expect(mockMainWindow.webContents.send).toHaveBeenCalledWith( 'task:error', 'task-1', - 'Test error message' + 'Test error message', + undefined // projectId is undefined when task not found ); }); @@ -556,7 +559,8 @@ describe('IPC Handlers', () => { expect(mockMainWindow.webContents.send).toHaveBeenCalledWith( 'task:statusChange', 'task-1', - 'human_review' + 'human_review', + expect.any(String) // projectId for multi-project filtering ); }); }); diff --git a/apps/frontend/src/main/agent/agent-manager.ts b/apps/frontend/src/main/agent/agent-manager.ts index a0d65d1fae..962259e3e5 100644 --- a/apps/frontend/src/main/agent/agent-manager.ts +++ b/apps/frontend/src/main/agent/agent-manager.ts @@ -87,14 +87,14 @@ export class AgentManager extends EventEmitter { /** * Start spec creation process */ - startSpecCreation( + async startSpecCreation( taskId: string, projectPath: string, taskDescription: string, specDir?: string, metadata?: SpecCreationMetadata, baseBranch?: string - ): void { + ): Promise { // Pre-flight auth check: Verify active profile has valid authentication const profileManager = getClaudeProfileManager(); if (!profileManager.hasValidAuth()) { @@ -152,22 +152,27 @@ export class AgentManager extends EventEmitter { } } + // Workspace mode: --direct skips worktree isolation (default is isolated for safety) + if (metadata?.useWorktree === false) { + args.push('--direct'); + } + // Store context for potential restart this.storeTaskContext(taskId, projectPath, '', {}, true, taskDescription, specDir, metadata, baseBranch); // Note: This is spec-creation but it chains to task-execution via run.py - this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution'); + await this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution'); } /** * Start task execution (run.py) */ - startTaskExecution( + async startTaskExecution( taskId: string, projectPath: string, specId: string, options: TaskExecutionOptions = {} - ): void { + ): Promise { // Pre-flight auth check: Verify active profile has valid authentication const profileManager = getClaudeProfileManager(); if (!profileManager.hasValidAuth()) { @@ -200,6 +205,11 @@ export class AgentManager extends EventEmitter { // Force: When user starts a task from the UI, that IS their approval args.push('--force'); + // Workspace mode: --direct skips worktree isolation (default is isolated for safety) + if (options.useWorktree === false) { + args.push('--direct'); + } + // Pass base branch if specified (ensures worktrees are created from the correct branch) if (options.baseBranch) { args.push('--base-branch', options.baseBranch); @@ -213,17 +223,17 @@ export class AgentManager extends EventEmitter { // Store context for potential restart this.storeTaskContext(taskId, projectPath, specId, options, false); - this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution'); + await this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution'); } /** * Start QA process */ - startQAProcess( + async startQAProcess( taskId: string, projectPath: string, specId: string - ): void { + ): Promise { const autoBuildSource = this.processManager.getAutoBuildSourcePath(); if (!autoBuildSource) { @@ -243,7 +253,7 @@ export class AgentManager extends EventEmitter { const args = [runPath, '--spec', specId, '--project-dir', projectPath, '--qa']; - this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'qa-process'); + await this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'qa-process'); } /** diff --git a/apps/frontend/src/main/agent/agent-process.test.ts b/apps/frontend/src/main/agent/agent-process.test.ts new file mode 100644 index 0000000000..db992bb598 --- /dev/null +++ b/apps/frontend/src/main/agent/agent-process.test.ts @@ -0,0 +1,494 @@ +/** + * Integration tests for AgentProcessManager + * Tests API profile environment variable injection into spawnProcess + * + * Story 2.3: Env Var Injection - AC1, AC2, AC3, AC4 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { EventEmitter } from 'events'; + +// Create a mock process object that will be returned by spawn +function createMockProcess() { + return { + stdout: { on: vi.fn() }, + stderr: { on: vi.fn() }, + on: vi.fn((event: string, callback: any) => { + if (event === 'exit') { + // Simulate immediate exit with code 0 + setTimeout(() => callback(0), 10); + } + }), + kill: vi.fn() + }; +} + +// Mock child_process - must be BEFORE imports of modules that use it +const spawnCalls: Array<{ command: string; args: string[]; options: { env: Record; cwd?: string; [key: string]: unknown } }> = []; + +vi.mock('child_process', async (importOriginal) => { + const actual = await importOriginal(); + const mockSpawn = vi.fn((command: string, args: string[], options: { env: Record; cwd?: string; [key: string]: unknown }) => { + // Record the call for test assertions + spawnCalls.push({ command, args, options }); + return createMockProcess(); + }); + + return { + ...actual, + spawn: mockSpawn, + execSync: vi.fn((command: string) => { + if (command.includes('git')) { + return '/fake/path'; + } + return ''; + }) + }; +}); + +// Mock project-initializer to avoid child_process.execSync issues +vi.mock('../project-initializer', () => ({ + getAutoBuildPath: vi.fn(() => '/fake/auto-build'), + isInitialized: vi.fn(() => true), + initializeProject: vi.fn(), + getProjectStorePath: vi.fn(() => '/fake/store/path') +})); + +// Mock project-store BEFORE agent-process imports it +vi.mock('../project-store', () => ({ + projectStore: { + getProject: vi.fn(), + listProjects: vi.fn(), + createProject: vi.fn(), + updateProject: vi.fn(), + deleteProject: vi.fn(), + getProjectSettings: vi.fn(), + updateProjectSettings: vi.fn() + } +})); + +// Mock claude-profile-manager +vi.mock('../claude-profile-manager', () => ({ + getClaudeProfileManager: vi.fn(() => ({ + getProfilePath: vi.fn(() => '/fake/profile/path'), + ensureProfileDir: vi.fn(), + readProfile: vi.fn(), + writeProfile: vi.fn(), + deleteProfile: vi.fn() + })) +})); + +// Mock dependencies +vi.mock('../services/profile', () => ({ + getAPIProfileEnv: vi.fn() +})); + +vi.mock('../rate-limit-detector', () => ({ + getProfileEnv: vi.fn(() => ({})), + detectRateLimit: vi.fn(() => ({ isRateLimited: false })), + createSDKRateLimitInfo: vi.fn(), + detectAuthFailure: vi.fn(() => ({ isAuthFailure: false })) +})); + +vi.mock('../python-detector', () => ({ + findPythonCommand: vi.fn(() => 'python'), + parsePythonCommand: vi.fn(() => ['python', []]) +})); + +vi.mock('electron', () => ({ + app: { + getAppPath: vi.fn(() => '/fake/app/path') + } +})); + +// Import AFTER all mocks are set up +import { AgentProcessManager } from './agent-process'; +import { AgentState } from './agent-state'; +import { AgentEvents } from './agent-events'; +import * as profileService from '../services/profile'; +import * as rateLimitDetector from '../rate-limit-detector'; + +describe('AgentProcessManager - API Profile Env Injection (Story 2.3)', () => { + let processManager: AgentProcessManager; + let state: AgentState; + let events: AgentEvents; + let emitter: EventEmitter; + + beforeEach(() => { + // Reset all mocks and spawn calls + vi.clearAllMocks(); + spawnCalls.length = 0; + + // Clear environment variables that could interfere with tests + delete process.env.ANTHROPIC_AUTH_TOKEN; + delete process.env.ANTHROPIC_BASE_URL; + delete process.env.CLAUDE_CODE_OAUTH_TOKEN; + + // Initialize components + state = new AgentState(); + events = new AgentEvents(); + emitter = new EventEmitter(); + processManager = new AgentProcessManager(state, events, emitter); + }); + + afterEach(() => { + processManager.killAllProcesses(); + }); + + describe('AC1: API Profile Env Var Injection', () => { + it('should inject ANTHROPIC_BASE_URL when active profile has baseUrl', async () => { + const mockApiProfileEnv = { + ANTHROPIC_BASE_URL: 'https://custom.api.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + expect(spawnCalls[0].command).toBe('python'); + expect(spawnCalls[0].args).toContain('run.py'); + expect(spawnCalls[0].options.env).toMatchObject({ + ANTHROPIC_BASE_URL: 'https://custom.api.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key' + }); + }); + + it('should inject ANTHROPIC_AUTH_TOKEN when active profile has apiKey', async () => { + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-custom-key-12345678' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + expect(spawnCalls[0].options.env.ANTHROPIC_AUTH_TOKEN).toBe('sk-custom-key-12345678'); + }); + + it('should inject model env vars when active profile has models configured', async () => { + const mockApiProfileEnv = { + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022', + ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + expect(spawnCalls[0].options.env).toMatchObject({ + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022', + ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022' + }); + }); + + it('should give API profile env vars highest precedence over extraEnv', async () => { + const extraEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-extra-token', + ANTHROPIC_BASE_URL: 'https://extra.com' + }; + + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-profile-token', + ANTHROPIC_BASE_URL: 'https://profile.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], extraEnv, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + // API profile should override extraEnv + expect(spawnCalls[0].options.env.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-token'); + expect(spawnCalls[0].options.env.ANTHROPIC_BASE_URL).toBe('https://profile.com'); + }); + }); + + describe('AC2: OAuth Mode (No Active Profile)', () => { + let originalEnv: NodeJS.ProcessEnv; + + beforeEach(() => { + // Save original environment before each test + originalEnv = { ...process.env }; + }); + + afterEach(() => { + // Restore original environment after each test + process.env = originalEnv; + }); + + it('should NOT set ANTHROPIC_AUTH_TOKEN when no active profile (OAuth mode)', async () => { + // Return empty object = OAuth mode + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + + // Set OAuth token via getProfileEnv (existing flow) + vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue({ + CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-123' + }); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + expect(spawnCalls).toHaveLength(1); + const envArg = spawnCalls[0].options.env as Record; + expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-123'); + // OAuth mode clears ANTHROPIC_AUTH_TOKEN with empty string (not undefined) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe(''); + }); + + it('should return empty object from getAPIProfileEnv when activeProfileId is null', async () => { + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + + const result = await profileService.getAPIProfileEnv(); + expect(result).toEqual({}); + }); + + it('should clear stale ANTHROPIC_AUTH_TOKEN from process.env when switching to OAuth mode', async () => { + // Simulate process.env having stale ANTHROPIC_* vars from previous session + process.env = { + ...originalEnv, + ANTHROPIC_AUTH_TOKEN: 'stale-token-from-env', + ANTHROPIC_BASE_URL: 'https://stale.example.com' + }; + + // OAuth mode - no active API profile + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + + // Set OAuth token + vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue({ + CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-456' + }); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // OAuth token should be present + expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-456'); + + // Stale ANTHROPIC_* vars should be cleared (empty string overrides process.env) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(envArg.ANTHROPIC_BASE_URL).toBe(''); + }); + + it('should clear stale ANTHROPIC_BASE_URL when switching to OAuth mode', async () => { + process.env = { + ...originalEnv, + ANTHROPIC_BASE_URL: 'https://old-custom-endpoint.com' + }; + + // OAuth mode + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue({ + CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-789' + }); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // Should clear the base URL (so Python uses default api.anthropic.com) + expect(envArg.ANTHROPIC_BASE_URL).toBe(''); + expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-789'); + }); + + it('should NOT clear ANTHROPIC_* vars when API Profile is active', async () => { + process.env = { + ...originalEnv, + ANTHROPIC_AUTH_TOKEN: 'old-token-in-env' + }; + + // API Profile mode - active profile + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-profile-active', + ANTHROPIC_BASE_URL: 'https://active-profile.com' + }; + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // Should use API profile vars, NOT clear them + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-active'); + expect(envArg.ANTHROPIC_BASE_URL).toBe('https://active-profile.com'); + }); + }); + + describe('AC4: No API Key Logging', () => { + it('should never log full API keys in spawn env vars', async () => { + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-sensitive-api-key-12345678', + ANTHROPIC_BASE_URL: 'https://api.example.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + // Mock ALL console methods to capture any debug/error output + const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + const consoleWarnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}); + const consoleDebugSpy = vi.spyOn(console, 'debug').mockImplementation(() => {}); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + // Get the env object passed to spawn + const envArg = spawnCalls[0].options.env as Record; + + // Verify the full API key is in the env (for Python subprocess) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('sk-sensitive-api-key-12345678'); + + // Collect ALL console output from all methods + const allLogCalls = [ + ...consoleLogSpy.mock.calls, + ...consoleErrorSpy.mock.calls, + ...consoleWarnSpy.mock.calls, + ...consoleDebugSpy.mock.calls + ].flatMap(call => call.map(String)); + const logString = JSON.stringify(allLogCalls); + + // The full API key should NOT appear in any logs (AC4 compliance) + expect(logString).not.toContain('sk-sensitive-api-key-12345678'); + + // Restore all spies + consoleLogSpy.mockRestore(); + consoleErrorSpy.mockRestore(); + consoleWarnSpy.mockRestore(); + consoleDebugSpy.mockRestore(); + }); + + it('should not log API key even in error scenarios', async () => { + const mockApiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-secret-key-for-error-test', + ANTHROPIC_BASE_URL: 'https://api.example.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv); + + // Mock console methods + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + // Collect all error and log output + const allOutput = [ + ...consoleErrorSpy.mock.calls, + ...consoleLogSpy.mock.calls + ].flatMap(call => call.map(arg => typeof arg === 'object' ? JSON.stringify(arg) : String(arg))); + const outputString = allOutput.join(' '); + + // Verify API key is never exposed in logs + expect(outputString).not.toContain('sk-secret-key-for-error-test'); + + consoleErrorSpy.mockRestore(); + consoleLogSpy.mockRestore(); + }); + }); + + describe('AC3: Profile Switching Between Builds', () => { + it('should allow different profiles for different spawn calls', async () => { + // First spawn with Profile A + const profileAEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-profile-a', + ANTHROPIC_BASE_URL: 'https://api-a.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValueOnce(profileAEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const firstEnv = spawnCalls[0].options.env as Record; + expect(firstEnv.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-a'); + + // Second spawn with Profile B (user switched active profile) + const profileBEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-profile-b', + ANTHROPIC_BASE_URL: 'https://api-b.com' + }; + + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValueOnce(profileBEnv); + + await processManager.spawnProcess('task-2', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const secondEnv = spawnCalls[1].options.env as Record; + expect(secondEnv.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-b'); + + // Verify first spawn's env is NOT affected by second spawn + expect(firstEnv.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-a'); + }); + }); + + describe('Integration: Combined env precedence', () => { + it('should merge env vars in correct precedence order', async () => { + const extraEnv = { + CUSTOM_VAR: 'from-extra' + }; + + const profileEnv = { + CLAUDE_CONFIG_DIR: '/custom/config' + }; + + const apiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-api-profile', + ANTHROPIC_BASE_URL: 'https://api-profile.com' + }; + + vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue(profileEnv); + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(apiProfileEnv); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], extraEnv, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // Verify all sources are included + expect(envArg.CUSTOM_VAR).toBe('from-extra'); // From extraEnv + expect(envArg.CLAUDE_CONFIG_DIR).toBe('/custom/config'); // From profileEnv + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('sk-api-profile'); // From apiProfileEnv (highest for ANTHROPIC_*) + + // Verify standard Python env vars + expect(envArg.PYTHONUNBUFFERED).toBe('1'); + expect(envArg.PYTHONIOENCODING).toBe('utf-8'); + expect(envArg.PYTHONUTF8).toBe('1'); + }); + + it('should call getOAuthModeClearVars and apply clearing when in OAuth mode', async () => { + // OAuth mode - empty API profile + vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({}); + + await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution'); + + const envArg = spawnCalls[0].options.env as Record; + + // Verify clearing vars are applied (empty strings for ANTHROPIC_* vars) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(envArg.ANTHROPIC_BASE_URL).toBe(''); + expect(envArg.ANTHROPIC_MODEL).toBe(''); + expect(envArg.ANTHROPIC_DEFAULT_HAIKU_MODEL).toBe(''); + expect(envArg.ANTHROPIC_DEFAULT_SONNET_MODEL).toBe(''); + expect(envArg.ANTHROPIC_DEFAULT_OPUS_MODEL).toBe(''); + }); + + it('should handle getAPIProfileEnv errors gracefully', async () => { + // Simulate service error + vi.mocked(profileService.getAPIProfileEnv).mockRejectedValue(new Error('Service unavailable')); + + // Should not throw - should fall back to OAuth mode + await expect( + processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution') + ).resolves.not.toThrow(); + + const envArg = spawnCalls[0].options.env as Record; + + // Should have clearing vars (falls back to OAuth mode on error) + expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(envArg.ANTHROPIC_BASE_URL).toBe(''); + }); + }); +}); diff --git a/apps/frontend/src/main/agent/agent-process.ts b/apps/frontend/src/main/agent/agent-process.ts index ef045555c0..03010bf959 100644 --- a/apps/frontend/src/main/agent/agent-process.ts +++ b/apps/frontend/src/main/agent/agent-process.ts @@ -7,6 +7,7 @@ import { AgentState } from './agent-state'; import { AgentEvents } from './agent-events'; import { ProcessType, ExecutionProgressData } from './types'; import { detectRateLimit, createSDKRateLimitInfo, getProfileEnv, detectAuthFailure } from '../rate-limit-detector'; +import { getAPIProfileEnv } from '../services/profile'; import { projectStore } from '../project-store'; import { getClaudeProfileManager } from '../claude-profile-manager'; import { parsePythonCommand, validatePythonPath } from '../python-detector'; @@ -14,6 +15,64 @@ import { pythonEnvManager, getConfiguredPythonPath } from '../python-env-manager import { buildMemoryEnvVars } from '../memory-env-builder'; import { readSettingsFile } from '../settings-utils'; import type { AppSettings } from '../../shared/types/settings'; +import { getOAuthModeClearVars } from './env-utils'; +import { getAugmentedEnv } from '../env-utils'; +import { getToolInfo } from '../cli-tool-manager'; + + +function deriveGitBashPath(gitExePath: string): string | null { + if (process.platform !== 'win32') { + return null; + } + + try { + const gitDir = path.dirname(gitExePath); // e.g., D:\...\Git\mingw64\bin + const gitDirName = path.basename(gitDir).toLowerCase(); + + // Find Git installation root + let gitRoot: string; + + if (gitDirName === 'cmd') { + // .../Git/cmd/git.exe -> .../Git + gitRoot = path.dirname(gitDir); + } else if (gitDirName === 'bin') { + // Could be .../Git/bin/git.exe OR .../Git/mingw64/bin/git.exe + const parent = path.dirname(gitDir); + const parentName = path.basename(parent).toLowerCase(); + if (parentName === 'mingw64' || parentName === 'mingw32') { + // .../Git/mingw64/bin/git.exe -> .../Git + gitRoot = path.dirname(parent); + } else { + // .../Git/bin/git.exe -> .../Git + gitRoot = parent; + } + } else { + // Unknown structure - try to find 'bin' sibling + gitRoot = path.dirname(gitDir); + } + + // Bash.exe is in Git/bin/bash.exe + const bashPath = path.join(gitRoot, 'bin', 'bash.exe'); + + if (existsSync(bashPath)) { + console.log('[AgentProcess] Derived git-bash path:', bashPath); + return bashPath; + } + + // Fallback: check one level up if gitRoot didn't work + const altBashPath = path.join(path.dirname(gitRoot), 'bin', 'bash.exe'); + if (existsSync(altBashPath)) { + console.log('[AgentProcess] Found git-bash at alternate path:', altBashPath); + return altBashPath; + } + + console.warn('[AgentProcess] Could not find bash.exe from git path:', gitExePath); + return null; + } catch (error) { + console.error('[AgentProcess] Error deriving git-bash path:', error); + return null; + } +} /** * Process spawning and lifecycle management @@ -53,8 +112,31 @@ export class AgentProcessManager { extraEnv: Record ): NodeJS.ProcessEnv { const profileEnv = getProfileEnv(); + // Use getAugmentedEnv() to ensure common tool paths (dotnet, homebrew, etc.) + // are available even when app is launched from Finder/Dock + const augmentedEnv = getAugmentedEnv(); + + // On Windows, detect and pass git-bash path for Claude Code CLI + // Electron can detect git via where.exe, but Python subprocess may not have the same PATH + const gitBashEnv: Record = {}; + if (process.platform === 'win32' && !process.env.CLAUDE_CODE_GIT_BASH_PATH) { + try { + const gitInfo = getToolInfo('git'); + if (gitInfo.found && gitInfo.path) { + const bashPath = deriveGitBashPath(gitInfo.path); + if (bashPath) { + gitBashEnv['CLAUDE_CODE_GIT_BASH_PATH'] = bashPath; + console.log('[AgentProcess] Setting CLAUDE_CODE_GIT_BASH_PATH:', bashPath); + } + } + } catch (error) { + console.warn('[AgentProcess] Failed to detect git-bash path:', error); + } + } + return { - ...process.env, + ...augmentedEnv, + ...gitBashEnv, ...extraEnv, ...profileEnv, PYTHONUNBUFFERED: '1', @@ -195,6 +277,8 @@ export class AgentProcessManager { // Auto-detect from app location (configured path was invalid or not set) const possiblePaths = [ + // Packaged app: backend is in extraResources (process.resourcesPath/backend) + ...(app.isPackaged ? [path.join(process.resourcesPath, 'backend')] : []), // Dev mode: from dist/main -> ../../backend (apps/frontend/out/main -> apps/backend) path.resolve(__dirname, '..', '..', '..', 'backend'), // Alternative: from app root -> apps/backend @@ -238,19 +322,10 @@ export class AgentProcessManager { } /** - * Load environment variables from project's .auto-claude/.env file - * This contains frontend-configured settings like memory/Graphiti configuration + * Parse environment variables from a .env file content. + * Filters out empty values to prevent overriding valid tokens from profiles. */ - private loadProjectEnv(projectPath: string): Record { - // Find project by path to get autoBuildPath - const projects = projectStore.getProjects(); - const project = projects.find((p) => p.path === projectPath); - - if (!project?.autoBuildPath) { - return {}; - } - - const envPath = path.join(projectPath, project.autoBuildPath, '.env'); + private parseEnvFile(envPath: string): Record { if (!existsSync(envPath)) { return {}; } @@ -274,11 +349,14 @@ export class AgentProcessManager { // Remove quotes if present if ((value.startsWith('"') && value.endsWith('"')) || - (value.startsWith("'") && value.endsWith("'"))) { + (value.startsWith("'") && value.endsWith("'"))) { value = value.slice(1, -1); } - envVars[key] = value; + // Skip empty values to prevent overriding valid values from other sources + if (value) { + envVars[key] = value; + } } } @@ -288,6 +366,23 @@ export class AgentProcessManager { } } + /** + * Load environment variables from project's .auto-claude/.env file + * This contains frontend-configured settings like memory/Graphiti configuration + */ + private loadProjectEnv(projectPath: string): Record { + // Find project by path to get autoBuildPath + const projects = projectStore.getProjects(); + const project = projects.find((p) => p.path === projectPath); + + if (!project?.autoBuildPath) { + return {}; + } + + const envPath = path.join(projectPath, project.autoBuildPath, '.env'); + return this.parseEnvFile(envPath); + } + /** * Load environment variables from auto-claude .env file */ @@ -298,50 +393,19 @@ export class AgentProcessManager { } const envPath = path.join(autoBuildSource, '.env'); - if (!existsSync(envPath)) { - return {}; - } - - try { - const envContent = readFileSync(envPath, 'utf-8'); - const envVars: Record = {}; - - // Handle both Unix (\n) and Windows (\r\n) line endings - for (const line of envContent.split(/\r?\n/)) { - const trimmed = line.trim(); - // Skip comments and empty lines - if (!trimmed || trimmed.startsWith('#')) { - continue; - } - - const eqIndex = trimmed.indexOf('='); - if (eqIndex > 0) { - const key = trimmed.substring(0, eqIndex).trim(); - let value = trimmed.substring(eqIndex + 1).trim(); - - // Remove quotes if present - if ((value.startsWith('"') && value.endsWith('"')) || - (value.startsWith("'") && value.endsWith("'"))) { - value = value.slice(1, -1); - } - - envVars[key] = value; - } - } - - return envVars; - } catch { - return {}; - } + return this.parseEnvFile(envPath); } - spawnProcess( + /** + * Spawn a Python process for task execution + */ + async spawnProcess( taskId: string, cwd: string, args: string[], extraEnv: Record = {}, processType: ProcessType = 'task-execution' - ): void { + ): Promise { const isSpecRunner = processType === 'spec-creation'; this.killProcess(taskId); @@ -351,13 +415,27 @@ export class AgentProcessManager { // Get Python environment (PYTHONPATH for bundled packages, etc.) const pythonEnv = pythonEnvManager.getPythonEnv(); - // Parse Python command to handle space-separated commands like "py -3" + // Get active API profile environment variables + let apiProfileEnv: Record = {}; + try { + apiProfileEnv = await getAPIProfileEnv(); + } catch (error) { + console.error('[Agent Process] Failed to get API profile env:', error); + // Continue with empty profile env (falls back to OAuth mode) + } + + // Get OAuth mode clearing vars (clears stale ANTHROPIC_* vars when in OAuth mode) + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + + // Parse Python commandto handle space-separated commands like "py -3" const [pythonCommand, pythonBaseArgs] = parsePythonCommand(this.getPythonPath()); const childProcess = spawn(pythonCommand, [...pythonBaseArgs, ...args], { cwd, env: { ...env, // Already includes process.env, extraEnv, profileEnv, PYTHONUNBUFFERED, PYTHONUTF8 - ...pythonEnv // Include Python environment (PYTHONPATH for bundled packages) + ...pythonEnv, // Include Python environment (PYTHONPATH for bundled packages) + ...oauthModeClearVars, // Clear stale ANTHROPIC_* vars when in OAuth mode + ...apiProfileEnv // Include active API profile config (highest priority for ANTHROPIC_* vars) } }); diff --git a/apps/frontend/src/main/agent/agent-queue.ts b/apps/frontend/src/main/agent/agent-queue.ts index 913290b35c..1d18be761b 100644 --- a/apps/frontend/src/main/agent/agent-queue.ts +++ b/apps/frontend/src/main/agent/agent-queue.ts @@ -7,8 +7,9 @@ import { AgentEvents } from './agent-events'; import { AgentProcessManager } from './agent-process'; import { RoadmapConfig } from './types'; import type { IdeationConfig, Idea } from '../../shared/types'; -import { MODEL_ID_MAP } from '../../shared/constants'; import { detectRateLimit, createSDKRateLimitInfo, getProfileEnv } from '../rate-limit-detector'; +import { getAPIProfileEnv } from '../services/profile'; +import { getOAuthModeClearVars } from './env-utils'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; import { parsePythonCommand } from '../python-detector'; import { pythonEnvManager } from '../python-env-manager'; @@ -37,6 +38,40 @@ export class AgentQueueManager { this.emitter = emitter; } + /** + * Ensure Python environment is ready before spawning processes. + * Prevents the race condition where generation starts before dependencies are installed, + * which would cause it to fall back to system Python and fail with ModuleNotFoundError. + * + * @param projectId - The project ID for error event emission + * @param eventType - The error event type to emit on failure + * @returns true if environment is ready, false if initialization failed (error already emitted) + */ + private async ensurePythonEnvReady( + projectId: string, + eventType: 'ideation-error' | 'roadmap-error' + ): Promise { + const autoBuildSource = this.processManager.getAutoBuildSourcePath(); + + if (!pythonEnvManager.isEnvReady()) { + debugLog('[Agent Queue] Python environment not ready, waiting for initialization...'); + if (autoBuildSource) { + const status = await pythonEnvManager.initialize(autoBuildSource); + if (!status.ready) { + debugError('[Agent Queue] Python environment initialization failed:', status.error); + this.emitter.emit(eventType, projectId, `Python environment not ready: ${status.error || 'initialization failed'}`); + return false; + } + debugLog('[Agent Queue] Python environment now ready'); + } else { + debugError('[Agent Queue] Cannot initialize Python - auto-build source not found'); + this.emitter.emit(eventType, projectId, 'Python environment not ready: auto-build source not found'); + return false; + } + } + return true; + } + /** * Start roadmap generation process * @@ -44,14 +79,14 @@ export class AgentQueueManager { * This allows refreshing competitor data independently of the general roadmap refresh. * Use when user explicitly wants new competitor research. */ - startRoadmapGeneration( + async startRoadmapGeneration( projectId: string, projectPath: string, refresh: boolean = false, enableCompetitorAnalysis: boolean = false, refreshCompetitorAnalysis: boolean = false, config?: RoadmapConfig - ): void { + ): Promise { debugLog('[Agent Queue] Starting roadmap generation:', { projectId, projectPath, @@ -94,9 +129,9 @@ export class AgentQueueManager { } // Add model and thinking level from config + // Pass shorthand (opus/sonnet/haiku) - backend resolves using API profile env vars if (config?.model) { - const modelId = MODEL_ID_MAP[config.model] || MODEL_ID_MAP['opus']; - args.push('--model', modelId); + args.push('--model', config.model); } if (config?.thinkingLevel) { args.push('--thinking-level', config.thinkingLevel); @@ -105,18 +140,18 @@ export class AgentQueueManager { debugLog('[Agent Queue] Spawning roadmap process with args:', args); // Use projectId as taskId for roadmap operations - this.spawnRoadmapProcess(projectId, projectPath, args); + await this.spawnRoadmapProcess(projectId, projectPath, args); } /** * Start ideation generation process */ - startIdeationGeneration( + async startIdeationGeneration( projectId: string, projectPath: string, config: IdeationConfig, refresh: boolean = false - ): void { + ): Promise { debugLog('[Agent Queue] Starting ideation generation:', { projectId, projectPath, @@ -170,9 +205,9 @@ export class AgentQueueManager { } // Add model and thinking level from config + // Pass shorthand (opus/sonnet/haiku) - backend resolves using API profile env vars if (config.model) { - const modelId = MODEL_ID_MAP[config.model] || MODEL_ID_MAP['opus']; - args.push('--model', modelId); + args.push('--model', config.model); } if (config.thinkingLevel) { args.push('--thinking-level', config.thinkingLevel); @@ -181,19 +216,28 @@ export class AgentQueueManager { debugLog('[Agent Queue] Spawning ideation process with args:', args); // Use projectId as taskId for ideation operations - this.spawnIdeationProcess(projectId, projectPath, args); + await this.spawnIdeationProcess(projectId, projectPath, args); } /** * Spawn a Python process for ideation generation */ - private spawnIdeationProcess( + private async spawnIdeationProcess( projectId: string, projectPath: string, args: string[] - ): void { + ): Promise { debugLog('[Agent Queue] Spawning ideation process:', { projectId, projectPath }); + // Run from auto-claude source directory so imports work correctly + const autoBuildSource = this.processManager.getAutoBuildSourcePath(); + const cwd = autoBuildSource || process.cwd(); + + // Ensure Python environment is ready before spawning + if (!await this.ensurePythonEnvReady(projectId, 'ideation-error')) { + return; + } + // Kill existing process for this project if any const wasKilled = this.processManager.killProcess(projectId); if (wasKilled) { @@ -204,9 +248,6 @@ export class AgentQueueManager { const spawnId = this.state.generateSpawnId(); debugLog('[Agent Queue] Generated spawn ID:', spawnId); - // Run from auto-claude source directory so imports work correctly - const autoBuildSource = this.processManager.getAutoBuildSourcePath(); - const cwd = autoBuildSource || process.cwd(); // Get combined environment variables const combinedEnv = this.processManager.getCombinedEnv(projectPath); @@ -214,6 +255,12 @@ export class AgentQueueManager { // Get active Claude profile environment (CLAUDE_CODE_OAUTH_TOKEN if not default) const profileEnv = getProfileEnv(); + // Get active API profile environment variables + const apiProfileEnv = await getAPIProfileEnv(); + + // Get OAuth mode clearing vars (clears stale ANTHROPIC_* vars when in OAuth mode) + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + // Get Python path from process manager (uses venv if configured) const pythonPath = this.processManager.getPythonPath(); @@ -234,28 +281,30 @@ export class AgentQueueManager { // 1. process.env (system) // 2. pythonEnv (bundled packages environment) // 3. combinedEnv (auto-claude/.env for CLI usage) - // 4. profileEnv (Electron app OAuth token - highest priority) - // 5. Our specific overrides + // 4. oauthModeClearVars (clear stale ANTHROPIC_* vars when in OAuth mode) + // 5. profileEnv (Electron app OAuth token) + // 6. apiProfileEnv (Active API profile config - highest priority for ANTHROPIC_* vars) + // 7. Our specific overrides const finalEnv = { ...process.env, ...pythonEnv, ...combinedEnv, + ...oauthModeClearVars, ...profileEnv, + ...apiProfileEnv, PYTHONPATH: combinedPythonPath, PYTHONUNBUFFERED: '1', PYTHONUTF8: '1' }; - // Debug: Show OAuth token source + // Debug: Show OAuth token source (token values intentionally omitted for security - AC4) const tokenSource = profileEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'Electron app profile' : (combinedEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'auto-claude/.env' : 'not found'); - const oauthToken = (finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN']; - const hasToken = !!oauthToken; + const hasToken = !!(finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN']; debugLog('[Agent Queue] OAuth token status:', { source: tokenSource, - hasToken, - tokenPreview: hasToken ? oauthToken?.substring(0, 20) + '...' : 'none' + hasToken }); // Parse Python command to handle space-separated commands like "py -3" @@ -500,13 +549,22 @@ export class AgentQueueManager { /** * Spawn a Python process for roadmap generation */ - private spawnRoadmapProcess( + private async spawnRoadmapProcess( projectId: string, projectPath: string, args: string[] - ): void { + ): Promise { debugLog('[Agent Queue] Spawning roadmap process:', { projectId, projectPath }); + // Run from auto-claude source directory so imports work correctly + const autoBuildSource = this.processManager.getAutoBuildSourcePath(); + const cwd = autoBuildSource || process.cwd(); + + // Ensure Python environment is ready before spawning + if (!await this.ensurePythonEnvReady(projectId, 'roadmap-error')) { + return; + } + // Kill existing process for this project if any const wasKilled = this.processManager.killProcess(projectId); if (wasKilled) { @@ -517,9 +575,6 @@ export class AgentQueueManager { const spawnId = this.state.generateSpawnId(); debugLog('[Agent Queue] Generated roadmap spawn ID:', spawnId); - // Run from auto-claude source directory so imports work correctly - const autoBuildSource = this.processManager.getAutoBuildSourcePath(); - const cwd = autoBuildSource || process.cwd(); // Get combined environment variables const combinedEnv = this.processManager.getCombinedEnv(projectPath); @@ -527,6 +582,12 @@ export class AgentQueueManager { // Get active Claude profile environment (CLAUDE_CODE_OAUTH_TOKEN if not default) const profileEnv = getProfileEnv(); + // Get active API profile environment variables + const apiProfileEnv = await getAPIProfileEnv(); + + // Get OAuth mode clearing vars (clears stale ANTHROPIC_* vars when in OAuth mode) + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + // Get Python path from process manager (uses venv if configured) const pythonPath = this.processManager.getPythonPath(); @@ -547,28 +608,30 @@ export class AgentQueueManager { // 1. process.env (system) // 2. pythonEnv (bundled packages environment) // 3. combinedEnv (auto-claude/.env for CLI usage) - // 4. profileEnv (Electron app OAuth token - highest priority) - // 5. Our specific overrides + // 4. oauthModeClearVars (clear stale ANTHROPIC_* vars when in OAuth mode) + // 5. profileEnv (Electron app OAuth token) + // 6. apiProfileEnv (Active API profile config - highest priority for ANTHROPIC_* vars) + // 7. Our specific overrides const finalEnv = { ...process.env, ...pythonEnv, ...combinedEnv, + ...oauthModeClearVars, ...profileEnv, + ...apiProfileEnv, PYTHONPATH: combinedPythonPath, PYTHONUNBUFFERED: '1', PYTHONUTF8: '1' }; - // Debug: Show OAuth token source + // Debug: Show OAuth token source (token values intentionally omitted for security - AC4) const tokenSource = profileEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'Electron app profile' : (combinedEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'auto-claude/.env' : 'not found'); - const oauthToken = (finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN']; - const hasToken = !!oauthToken; + const hasToken = !!(finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN']; debugLog('[Agent Queue] OAuth token status:', { source: tokenSource, - hasToken, - tokenPreview: hasToken ? oauthToken?.substring(0, 20) + '...' : 'none' + hasToken }); // Parse Python command to handle space-separated commands like "py -3" diff --git a/apps/frontend/src/main/agent/env-utils.test.ts b/apps/frontend/src/main/agent/env-utils.test.ts new file mode 100644 index 0000000000..6a5d42c54e --- /dev/null +++ b/apps/frontend/src/main/agent/env-utils.test.ts @@ -0,0 +1,134 @@ +/** + * Unit tests for env-utils + * Tests OAuth mode environment variable clearing functionality + */ + +import { describe, it, expect } from 'vitest'; +import { getOAuthModeClearVars } from './env-utils'; + +describe('getOAuthModeClearVars', () => { + describe('OAuth mode (no active API profile)', () => { + it('should return clearing vars when apiProfileEnv is empty', () => { + const result = getOAuthModeClearVars({}); + + expect(result).toEqual({ + ANTHROPIC_API_KEY: '', + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: '', + ANTHROPIC_MODEL: '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: '', + ANTHROPIC_DEFAULT_SONNET_MODEL: '', + ANTHROPIC_DEFAULT_OPUS_MODEL: '' + }); + }); + + it('should clear all ANTHROPIC_* environment variables', () => { + const result = getOAuthModeClearVars({}); + + // Verify all known ANTHROPIC_* vars are cleared + expect(result.ANTHROPIC_API_KEY).toBe(''); + expect(result.ANTHROPIC_AUTH_TOKEN).toBe(''); + expect(result.ANTHROPIC_BASE_URL).toBe(''); + expect(result.ANTHROPIC_MODEL).toBe(''); + expect(result.ANTHROPIC_DEFAULT_HAIKU_MODEL).toBe(''); + expect(result.ANTHROPIC_DEFAULT_SONNET_MODEL).toBe(''); + expect(result.ANTHROPIC_DEFAULT_OPUS_MODEL).toBe(''); + }); + }); + + describe('API Profile mode (active profile)', () => { + it('should return empty object when apiProfileEnv has values', () => { + const apiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-active-profile', + ANTHROPIC_BASE_URL: 'https://custom.api.com' + }; + + const result = getOAuthModeClearVars(apiProfileEnv); + + expect(result).toEqual({}); + }); + + it('should NOT clear vars when API profile is active', () => { + const apiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-test', + ANTHROPIC_BASE_URL: 'https://test.com', + ANTHROPIC_MODEL: 'claude-3-opus' + }; + + const result = getOAuthModeClearVars(apiProfileEnv); + + // Should not return any clearing vars + expect(Object.keys(result)).toHaveLength(0); + }); + + it('should detect non-empty profile even with single property', () => { + const apiProfileEnv = { + ANTHROPIC_AUTH_TOKEN: 'sk-minimal' + }; + + const result = getOAuthModeClearVars(apiProfileEnv); + + expect(result).toEqual({}); + }); + }); + + describe('Edge cases', () => { + it('should handle undefined gracefully (treat as empty)', () => { + // TypeScript should prevent this, but runtime safety + const result = getOAuthModeClearVars(undefined as any); + + // Should treat undefined as empty object -> OAuth mode + expect(result).toBeDefined(); + }); + + it('should handle null gracefully (treat as empty)', () => { + // Runtime safety for null values + const result = getOAuthModeClearVars(null as any); + + // Should treat null as OAuth mode and return clearing vars + expect(result).toEqual({ + ANTHROPIC_API_KEY: '', + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: '', + ANTHROPIC_MODEL: '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: '', + ANTHROPIC_DEFAULT_SONNET_MODEL: '', + ANTHROPIC_DEFAULT_OPUS_MODEL: '' + }); + }); + + it('should return consistent object shape for OAuth mode', () => { + const result1 = getOAuthModeClearVars({}); + const result2 = getOAuthModeClearVars({}); + + expect(result1).toEqual(result2); + // Use specific expected keys instead of magic number + const expectedKeys = [ + 'ANTHROPIC_API_KEY', + 'ANTHROPIC_AUTH_TOKEN', + 'ANTHROPIC_BASE_URL', + 'ANTHROPIC_MODEL', + 'ANTHROPIC_DEFAULT_HAIKU_MODEL', + 'ANTHROPIC_DEFAULT_SONNET_MODEL', + 'ANTHROPIC_DEFAULT_OPUS_MODEL' + ]; + expect(Object.keys(result1).sort()).toEqual(expectedKeys.sort()); + }); + + it('should NOT clear if apiProfileEnv has non-ANTHROPIC keys only', () => { + // Edge case: service returns metadata but no ANTHROPIC_* vars + const result = getOAuthModeClearVars({ SOME_OTHER_VAR: 'value' }); + + // Should treat as OAuth mode since no ANTHROPIC_* keys present + expect(result).toEqual({ + ANTHROPIC_API_KEY: '', + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: '', + ANTHROPIC_MODEL: '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: '', + ANTHROPIC_DEFAULT_SONNET_MODEL: '', + ANTHROPIC_DEFAULT_OPUS_MODEL: '' + }); + }); + }); +}); diff --git a/apps/frontend/src/main/agent/env-utils.ts b/apps/frontend/src/main/agent/env-utils.ts new file mode 100644 index 0000000000..ba384dfa01 --- /dev/null +++ b/apps/frontend/src/main/agent/env-utils.ts @@ -0,0 +1,44 @@ +/** + * Utility functions for managing environment variables in agent spawning + */ + +/** + * Get environment variables to clear ANTHROPIC_* vars when in OAuth mode + * + * When switching from API Profile mode to OAuth mode, residual ANTHROPIC_* + * environment variables from process.env can cause authentication failures. + * This function returns an object with empty strings for these vars when + * no API profile is active, ensuring OAuth tokens are used correctly. + * + * **Why empty strings?** Setting environment variables to empty strings (rather than + * undefined) ensures they override any stale values from process.env. Python's SDK + * treats empty strings as falsy in conditional checks like `if token:`, so empty + * strings effectively disable these authentication parameters without leaving + * undefined values that might be ignored during object spreading. + * + * @param apiProfileEnv - Environment variables from getAPIProfileEnv() + * @returns Object with empty ANTHROPIC_* vars if in OAuth mode, empty object otherwise + */ +export function getOAuthModeClearVars(apiProfileEnv: Record): Record { + // If API profile is active (has ANTHROPIC_* vars), don't clear anything + if (apiProfileEnv && Object.keys(apiProfileEnv).some(key => key.startsWith('ANTHROPIC_'))) { + return {}; + } + + // In OAuth mode (no API profile), clear all ANTHROPIC_* vars + // Setting to empty string ensures they override any values from process.env + // Python's `if token:` checks treat empty strings as falsy + // + // IMPORTANT: ANTHROPIC_API_KEY is included to prevent Claude Code from using + // API keys that may be present in the shell environment instead of OAuth tokens. + // Without clearing this, Claude Code would show "Claude API" instead of "Claude Max". + return { + ANTHROPIC_API_KEY: '', + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: '', + ANTHROPIC_MODEL: '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: '', + ANTHROPIC_DEFAULT_SONNET_MODEL: '', + ANTHROPIC_DEFAULT_OPUS_MODEL: '' + }; +} diff --git a/apps/frontend/src/main/agent/types.ts b/apps/frontend/src/main/agent/types.ts index fa3c5b8d9d..8d9eb9fcc9 100644 --- a/apps/frontend/src/main/agent/types.ts +++ b/apps/frontend/src/main/agent/types.ts @@ -44,6 +44,7 @@ export interface TaskExecutionOptions { parallel?: boolean; workers?: number; baseBranch?: string; + useWorktree?: boolean; // If false, use --direct mode (no worktree isolation) } export interface SpecCreationMetadata { @@ -65,6 +66,8 @@ export interface SpecCreationMetadata { // Non-auto profile - single model and thinking level model?: 'haiku' | 'sonnet' | 'opus'; thinkingLevel?: 'none' | 'low' | 'medium' | 'high' | 'ultrathink'; + // Workspace mode - whether to use worktree isolation + useWorktree?: boolean; // If false, use --direct mode (no worktree isolation) } export interface IdeationProgressData { diff --git a/apps/frontend/src/main/app-updater.ts b/apps/frontend/src/main/app-updater.ts index a76444dd3b..98f1f824bf 100644 --- a/apps/frontend/src/main/app-updater.ts +++ b/apps/frontend/src/main/app-updater.ts @@ -18,12 +18,16 @@ */ import { autoUpdater } from 'electron-updater'; -import { app } from 'electron'; +import { app, net } from 'electron'; import type { BrowserWindow } from 'electron'; import { IPC_CHANNELS } from '../shared/constants'; import type { AppUpdateInfo } from '../shared/types'; import { compareVersions } from './updater/version-manager'; +// GitHub repo info for API calls +const GITHUB_OWNER = 'AndyMik90'; +const GITHUB_REPO = 'Auto-Claude'; + // Debug mode - DEBUG_UPDATER=true or development mode const DEBUG_UPDATER = process.env.DEBUG_UPDATER === 'true' || process.env.NODE_ENV === 'development'; @@ -251,3 +255,214 @@ export function quitAndInstall(): void { export function getCurrentVersion(): string { return autoUpdater.currentVersion.version; } + +/** + * Check if a version string represents a prerelease (beta, alpha, rc, etc.) + */ +export function isPrerelease(version: string): boolean { + return /-(alpha|beta|rc|dev|canary)\.\d+$/i.test(version) || version.includes('-'); +} + +// Timeout for GitHub API requests (10 seconds) +const GITHUB_API_TIMEOUT = 10000; + +/** + * Fetch the latest stable release from GitHub API + * Returns the latest non-prerelease version + */ +async function fetchLatestStableRelease(): Promise { + const fetchPromise = new Promise((resolve) => { + const url = `https://api.github.com/repos/${GITHUB_OWNER}/${GITHUB_REPO}/releases`; + console.warn('[app-updater] Fetching releases from:', url); + + const request = net.request({ + url, + method: 'GET' + }); + + request.setHeader('Accept', 'application/vnd.github.v3+json'); + request.setHeader('User-Agent', `Auto-Claude/${getCurrentVersion()}`); + + let data = ''; + + request.on('response', (response) => { + // Validate HTTP status code + const statusCode = response.statusCode; + if (statusCode !== 200) { + // Sanitize statusCode to prevent log injection + // Convert to number and validate range to ensure it's a valid HTTP status code + const numericCode = Number(statusCode); + const safeStatusCode = (Number.isInteger(numericCode) && numericCode >= 100 && numericCode < 600) + ? String(numericCode) + : 'unknown'; + console.error(`[app-updater] GitHub API error: HTTP ${safeStatusCode}`); + if (statusCode === 403) { + console.error('[app-updater] Rate limit may have been exceeded'); + } else if (statusCode === 404) { + console.error('[app-updater] Repository or releases not found'); + } + resolve(null); + return; + } + + response.on('data', (chunk) => { + data += chunk.toString(); + }); + + response.on('end', () => { + try { + const parsed = JSON.parse(data); + + // Validate response is an array + if (!Array.isArray(parsed)) { + console.error('[app-updater] Unexpected response format - expected array, got:', typeof parsed); + resolve(null); + return; + } + + const releases = parsed as Array<{ + tag_name: string; + prerelease: boolean; + draft: boolean; + body?: string; + published_at?: string; + html_url?: string; + }>; + + // Find the first non-prerelease, non-draft release + const latestStable = releases.find(r => !r.prerelease && !r.draft); + + if (!latestStable) { + console.warn('[app-updater] No stable release found'); + resolve(null); + return; + } + + const version = latestStable.tag_name.replace(/^v/, ''); + // Sanitize version string for logging (remove control characters and limit length) + // eslint-disable-next-line no-control-regex + const safeVersion = String(version).replace(/[\x00-\x1f\x7f]/g, '').slice(0, 50); + console.warn('[app-updater] Found latest stable release:', safeVersion); + + resolve({ + version, + releaseNotes: latestStable.body, + releaseDate: latestStable.published_at + }); + } catch (e) { + // Sanitize error message for logging (prevent log injection from malformed JSON) + const safeError = e instanceof Error ? e.message : 'Unknown parse error'; + console.error('[app-updater] Failed to parse releases JSON:', safeError); + resolve(null); + } + }); + }); + + request.on('error', (error) => { + // Sanitize error message for logging (use only the message property) + const safeErrorMessage = error instanceof Error ? error.message : 'Unknown error'; + console.error('[app-updater] Failed to fetch releases:', safeErrorMessage); + resolve(null); + }); + + request.end(); + }); + + // Add timeout to prevent hanging indefinitely + const timeoutPromise = new Promise((resolve) => { + setTimeout(() => { + console.error(`[app-updater] GitHub API request timed out after ${GITHUB_API_TIMEOUT}ms`); + resolve(null); + }, GITHUB_API_TIMEOUT); + }); + + return Promise.race([fetchPromise, timeoutPromise]); +} + +/** + * Check if we should offer a downgrade to stable + * Called when user disables beta updates while on a prerelease version + * + * Returns the latest stable version if: + * 1. Current version is a prerelease + * 2. A stable version exists + */ +export async function checkForStableDowngrade(): Promise { + const currentVersion = getCurrentVersion(); + + // Only check for downgrade if currently on a prerelease + if (!isPrerelease(currentVersion)) { + console.warn('[app-updater] Current version is not a prerelease, no downgrade needed'); + return null; + } + + console.warn('[app-updater] Current version is prerelease:', currentVersion); + console.warn('[app-updater] Checking for stable version to downgrade to...'); + + const latestStable = await fetchLatestStableRelease(); + + if (!latestStable) { + console.warn('[app-updater] No stable release available for downgrade'); + return null; + } + + console.warn('[app-updater] Stable downgrade available:', latestStable.version); + return latestStable; +} + +/** + * Set update channel with optional downgrade check + * When switching from beta to stable, checks if user should be offered a downgrade + * + * @param channel - The update channel to switch to + * @param triggerDowngradeCheck - Whether to check for stable downgrade (when disabling beta) + */ +export async function setUpdateChannelWithDowngradeCheck( + channel: UpdateChannel, + triggerDowngradeCheck = false +): Promise { + autoUpdater.channel = channel; + console.warn(`[app-updater] Update channel set to: ${channel}`); + + // If switching to stable and downgrade check requested, look for stable version + if (channel === 'latest' && triggerDowngradeCheck) { + const stableVersion = await checkForStableDowngrade(); + + if (stableVersion && mainWindow) { + // Notify the renderer about the available stable downgrade + mainWindow.webContents.send(IPC_CHANNELS.APP_UPDATE_STABLE_DOWNGRADE, stableVersion); + } + + return stableVersion; + } + + return null; +} + +/** + * Download a specific version (for downgrade) + * Uses electron-updater with allowDowngrade enabled to download older stable versions + */ +export async function downloadStableVersion(): Promise { + // Switch to stable channel + autoUpdater.channel = 'latest'; + // Enable downgrade to allow downloading older versions (e.g., stable when on beta) + autoUpdater.allowDowngrade = true; + console.warn('[app-updater] Downloading stable version (allowDowngrade=true)...'); + + try { + // Force a fresh check on the stable channel, then download + const result = await autoUpdater.checkForUpdates(); + if (result) { + await autoUpdater.downloadUpdate(); + } else { + throw new Error('No stable version available for download'); + } + } catch (error) { + console.error('[app-updater] Failed to download stable version:', error); + throw error; + } finally { + // Reset allowDowngrade to prevent unintended downgrades in normal update checks + autoUpdater.allowDowngrade = false; + } +} diff --git a/apps/frontend/src/main/auto-claude-updater.ts b/apps/frontend/src/main/auto-claude-updater.ts deleted file mode 100644 index b19e19855e..0000000000 --- a/apps/frontend/src/main/auto-claude-updater.ts +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Auto Claude Source Updater - * - * Checks GitHub Releases for updates and downloads them. - * GitHub Releases are the single source of truth for versioning. - * - * Update flow: - * 1. Check GitHub Releases API for the latest release - * 2. Compare release tag with current app version - * 3. If update available, download release tarball and apply - * 4. Existing project update system handles pushing to individual projects - * - * Versioning: - * - Single source of truth: GitHub Releases - * - Current version: app.getVersion() (from package.json at build time) - * - Latest version: Fetched from GitHub Releases API - * - To release: Create a GitHub release with tag (e.g., v1.2.0) - */ - -// Export types -export type { - GitHubRelease, - AutoBuildUpdateCheck, - AutoBuildUpdateResult, - UpdateProgressCallback, - UpdateMetadata -} from './updater/types'; - -// Export version management -export { getBundledVersion, getEffectiveVersion } from './updater/version-manager'; - -// Export path resolution -export { - getBundledSourcePath, - getEffectiveSourcePath -} from './updater/path-resolver'; - -// Export update checking -export { checkForUpdates } from './updater/update-checker'; - -// Export update installation -export { downloadAndApplyUpdate } from './updater/update-installer'; - -// Export update status -export { - hasPendingSourceUpdate, - getUpdateMetadata -} from './updater/update-status'; diff --git a/apps/frontend/src/main/changelog/generator.ts b/apps/frontend/src/main/changelog/generator.ts index c71af9c3d4..6fa75c06fb 100644 --- a/apps/frontend/src/main/changelog/generator.ts +++ b/apps/frontend/src/main/changelog/generator.ts @@ -13,6 +13,7 @@ import { extractChangelog } from './parser'; import { getCommits, getBranchDiffCommits } from './git-integration'; import { detectRateLimit, createSDKRateLimitInfo, getProfileEnv } from '../rate-limit-detector'; import { parsePythonCommand } from '../python-detector'; +import { getAugmentedEnv } from '../env-utils'; /** * Core changelog generation logic @@ -246,21 +247,9 @@ export class ChangelogGenerator extends EventEmitter { const homeDir = os.homedir(); const isWindows = process.platform === 'win32'; - // Build PATH with platform-appropriate separator and locations - const pathAdditions = isWindows - ? [ - path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude'), - path.join(homeDir, 'AppData', 'Roaming', 'npm'), - path.join(homeDir, '.local', 'bin'), - 'C:\\Program Files\\Claude', - 'C:\\Program Files (x86)\\Claude' - ] - : [ - '/usr/local/bin', - '/opt/homebrew/bin', - path.join(homeDir, '.local', 'bin'), - path.join(homeDir, 'bin') - ]; + // Use getAugmentedEnv() to ensure common tool paths are available + // even when app is launched from Finder/Dock + const augmentedEnv = getAugmentedEnv(); // Get active Claude profile environment (OAuth token preferred, falls back to CLAUDE_CONFIG_DIR) const profileEnv = getProfileEnv(); @@ -271,15 +260,13 @@ export class ChangelogGenerator extends EventEmitter { }); const spawnEnv: Record = { - ...process.env as Record, + ...augmentedEnv, ...this.autoBuildEnv, ...profileEnv, // Include active Claude profile config // Ensure critical env vars are set for claude CLI // Use USERPROFILE on Windows, HOME on Unix ...(isWindows ? { USERPROFILE: homeDir } : { HOME: homeDir }), USER: process.env.USER || process.env.USERNAME || 'user', - // Add common binary locations to PATH for claude CLI - PATH: [process.env.PATH || '', ...pathAdditions].filter(Boolean).join(path.delimiter), PYTHONUNBUFFERED: '1', PYTHONIOENCODING: 'utf-8', PYTHONUTF8: '1' diff --git a/apps/frontend/src/main/changelog/version-suggester.ts b/apps/frontend/src/main/changelog/version-suggester.ts index 4869fe41ef..6d4a9b9126 100644 --- a/apps/frontend/src/main/changelog/version-suggester.ts +++ b/apps/frontend/src/main/changelog/version-suggester.ts @@ -1,9 +1,9 @@ import { spawn } from 'child_process'; -import * as path from 'path'; import * as os from 'os'; import type { GitCommit } from '../../shared/types'; import { getProfileEnv } from '../rate-limit-detector'; import { parsePythonCommand } from '../python-detector'; +import { getAugmentedEnv } from '../env-utils'; interface VersionSuggestion { version: string; @@ -215,31 +215,19 @@ except Exception as e: const homeDir = os.homedir(); const isWindows = process.platform === 'win32'; - // Build PATH with platform-appropriate separator and locations - const pathAdditions = isWindows - ? [ - path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude'), - path.join(homeDir, 'AppData', 'Roaming', 'npm'), - path.join(homeDir, '.local', 'bin'), - 'C:\\Program Files\\Claude', - 'C:\\Program Files (x86)\\Claude' - ] - : [ - '/usr/local/bin', - '/opt/homebrew/bin', - path.join(homeDir, '.local', 'bin'), - path.join(homeDir, 'bin') - ]; + // Use getAugmentedEnv() to ensure common tool paths are available + // even when app is launched from Finder/Dock + const augmentedEnv = getAugmentedEnv(); // Get active Claude profile environment const profileEnv = getProfileEnv(); const spawnEnv: Record = { - ...process.env as Record, + ...augmentedEnv, ...profileEnv, + // Ensure critical env vars are set for claude CLI ...(isWindows ? { USERPROFILE: homeDir } : { HOME: homeDir }), USER: process.env.USER || process.env.USERNAME || 'user', - PATH: [process.env.PATH || '', ...pathAdditions].filter(Boolean).join(path.delimiter), PYTHONUNBUFFERED: '1', PYTHONIOENCODING: 'utf-8', PYTHONUTF8: '1' diff --git a/apps/frontend/src/main/claude-cli-utils.ts b/apps/frontend/src/main/claude-cli-utils.ts new file mode 100644 index 0000000000..49a0c49c71 --- /dev/null +++ b/apps/frontend/src/main/claude-cli-utils.ts @@ -0,0 +1,77 @@ +import path from 'path'; +import { getAugmentedEnv, getAugmentedEnvAsync } from './env-utils'; +import { getToolPath, getToolPathAsync } from './cli-tool-manager'; + +export type ClaudeCliInvocation = { + command: string; + env: Record; +}; + +function ensureCommandDirInPath(command: string, env: Record): Record { + if (!path.isAbsolute(command)) { + return env; + } + + const pathSeparator = process.platform === 'win32' ? ';' : ':'; + const commandDir = path.dirname(command); + const currentPath = env.PATH || ''; + const pathEntries = currentPath.split(pathSeparator); + const normalizedCommandDir = path.normalize(commandDir); + const hasCommandDir = process.platform === 'win32' + ? pathEntries + .map((entry) => path.normalize(entry).toLowerCase()) + .includes(normalizedCommandDir.toLowerCase()) + : pathEntries + .map((entry) => path.normalize(entry)) + .includes(normalizedCommandDir); + + if (hasCommandDir) { + return env; + } + + return { + ...env, + PATH: [commandDir, currentPath].filter(Boolean).join(pathSeparator), + }; +} + +/** + * Returns the Claude CLI command path and an environment with PATH updated to include the CLI directory. + * + * WARNING: This function uses synchronous subprocess calls that block the main process. + * For use in Electron main process, prefer getClaudeCliInvocationAsync() instead. + */ +export function getClaudeCliInvocation(): ClaudeCliInvocation { + const command = getToolPath('claude'); + const env = getAugmentedEnv(); + + return { + command, + env: ensureCommandDirInPath(command, env), + }; +} + +/** + * Returns the Claude CLI command path and environment asynchronously (non-blocking). + * + * Safe to call from Electron main process without blocking the event loop. + * Uses cached values if available for instant response. + * + * @example + * ```typescript + * const { command, env } = await getClaudeCliInvocationAsync(); + * spawn(command, ['--version'], { env }); + * ``` + */ +export async function getClaudeCliInvocationAsync(): Promise { + // Run both detections in parallel for efficiency + const [command, env] = await Promise.all([ + getToolPathAsync('claude'), + getAugmentedEnvAsync(), + ]); + + return { + command, + env: ensureCommandDirInPath(command, env), + }; +} diff --git a/apps/frontend/src/main/claude-profile-manager.ts b/apps/frontend/src/main/claude-profile-manager.ts index 0f9c88f6d6..f64ef42d81 100644 --- a/apps/frontend/src/main/claude-profile-manager.ts +++ b/apps/frontend/src/main/claude-profile-manager.ts @@ -13,7 +13,7 @@ import { app } from 'electron'; import { join } from 'path'; -import { existsSync, mkdirSync } from 'fs'; +import { mkdir } from 'fs/promises'; import type { ClaudeProfile, ClaudeProfileSettings, @@ -32,6 +32,7 @@ import { } from './claude-profile/rate-limit-manager'; import { loadProfileStore, + loadProfileStoreAsync, saveProfileStore, ProfileStoreData, DEFAULT_AUTO_SWITCH_SETTINGS @@ -57,19 +58,45 @@ import { */ export class ClaudeProfileManager { private storePath: string; + private configDir: string; private data: ProfileStoreData; + private initialized: boolean = false; constructor() { - const configDir = join(app.getPath('userData'), 'config'); - this.storePath = join(configDir, 'claude-profiles.json'); + this.configDir = join(app.getPath('userData'), 'config'); + this.storePath = join(this.configDir, 'claude-profiles.json'); - // Ensure directory exists - if (!existsSync(configDir)) { - mkdirSync(configDir, { recursive: true }); + // DON'T do file I/O here - defer to async initialize() + // Start with default data until initialized + this.data = this.createDefaultData(); + } + + /** + * Initialize the profile manager asynchronously (non-blocking) + * This should be called at app startup via initializeClaudeProfileManager() + */ + async initialize(): Promise { + if (this.initialized) return; + + // Ensure directory exists (async) - mkdir with recursive:true is idempotent + await mkdir(this.configDir, { recursive: true }); + + // Load existing data asynchronously + const loadedData = await loadProfileStoreAsync(this.storePath); + if (loadedData) { + this.data = loadedData; } + // else: keep the default data from constructor + + this.initialized = true; + console.warn('[ClaudeProfileManager] Initialized asynchronously'); + } - // Load existing data or initialize with default profile - this.data = this.load(); + /** + * Check if the profile manager has been initialized + */ + isInitialized(): boolean { + return this.initialized; } /** @@ -522,11 +549,13 @@ export class ClaudeProfileManager { } } -// Singleton instance +// Singleton instance and initialization promise let profileManager: ClaudeProfileManager | null = null; +let initPromise: Promise | null = null; /** * Get the singleton Claude profile manager instance + * Note: For async contexts, prefer initializeClaudeProfileManager() to ensure initialization */ export function getClaudeProfileManager(): ClaudeProfileManager { if (!profileManager) { @@ -534,3 +563,28 @@ export function getClaudeProfileManager(): ClaudeProfileManager { } return profileManager; } + +/** + * Initialize and get the singleton Claude profile manager instance (async) + * This ensures the profile manager is fully initialized before use. + * Uses promise caching to prevent concurrent initialization. + */ +export async function initializeClaudeProfileManager(): Promise { + if (!profileManager) { + profileManager = new ClaudeProfileManager(); + } + + // If already initialized, return immediately + if (profileManager.isInitialized()) { + return profileManager; + } + + // If initialization is in progress, wait for it (promise caching) + if (!initPromise) { + initPromise = profileManager.initialize().then(() => { + return profileManager!; + }); + } + + return initPromise; +} diff --git a/apps/frontend/src/main/claude-profile/profile-storage.ts b/apps/frontend/src/main/claude-profile/profile-storage.ts index bd5b89c372..a4c825e2f2 100644 --- a/apps/frontend/src/main/claude-profile/profile-storage.ts +++ b/apps/frontend/src/main/claude-profile/profile-storage.ts @@ -4,6 +4,7 @@ */ import { existsSync, readFileSync, writeFileSync } from 'fs'; +import { readFile } from 'fs/promises'; import type { ClaudeProfile, ClaudeAutoSwitchSettings } from '../../shared/types'; export const STORE_VERSION = 3; // Bumped for encrypted token storage @@ -30,6 +31,42 @@ export interface ProfileStoreData { autoSwitch?: ClaudeAutoSwitchSettings; } +/** + * Parse and migrate profile data from JSON. + * Handles version migration and date parsing. + * Shared helper used by both sync and async loaders. + */ +function parseAndMigrateProfileData(data: Record): ProfileStoreData | null { + // Handle version migration + if (data.version === 1) { + // Migrate v1 to v2: add usage and rateLimitEvents fields + data.version = STORE_VERSION; + data.autoSwitch = DEFAULT_AUTO_SWITCH_SETTINGS; + } + + if (data.version === STORE_VERSION) { + // Parse dates + const profiles = data.profiles as ClaudeProfile[]; + data.profiles = profiles.map((p: ClaudeProfile) => ({ + ...p, + createdAt: new Date(p.createdAt), + lastUsedAt: p.lastUsedAt ? new Date(p.lastUsedAt) : undefined, + usage: p.usage ? { + ...p.usage, + lastUpdated: new Date(p.usage.lastUpdated) + } : undefined, + rateLimitEvents: p.rateLimitEvents?.map(e => ({ + ...e, + hitAt: new Date(e.hitAt), + resetAt: new Date(e.resetAt) + })) + })); + return data as unknown as ProfileStoreData; + } + + return null; +} + /** * Load profiles from disk */ @@ -38,32 +75,7 @@ export function loadProfileStore(storePath: string): ProfileStoreData | null { if (existsSync(storePath)) { const content = readFileSync(storePath, 'utf-8'); const data = JSON.parse(content); - - // Handle version migration - if (data.version === 1) { - // Migrate v1 to v2: add usage and rateLimitEvents fields - data.version = STORE_VERSION; - data.autoSwitch = DEFAULT_AUTO_SWITCH_SETTINGS; - } - - if (data.version === STORE_VERSION) { - // Parse dates - data.profiles = data.profiles.map((p: ClaudeProfile) => ({ - ...p, - createdAt: new Date(p.createdAt), - lastUsedAt: p.lastUsedAt ? new Date(p.lastUsedAt) : undefined, - usage: p.usage ? { - ...p.usage, - lastUpdated: new Date(p.usage.lastUpdated) - } : undefined, - rateLimitEvents: p.rateLimitEvents?.map(e => ({ - ...e, - hitAt: new Date(e.hitAt), - resetAt: new Date(e.resetAt) - })) - })); - return data; - } + return parseAndMigrateProfileData(data); } } catch (error) { console.error('[ProfileStorage] Error loading profiles:', error); @@ -72,6 +84,27 @@ export function loadProfileStore(storePath: string): ProfileStoreData | null { return null; } +/** + * Load profiles from disk (async, non-blocking) + * Use this version for initialization to avoid blocking the main process. + */ +export async function loadProfileStoreAsync(storePath: string): Promise { + try { + // Read file directly - avoid TOCTOU race condition by not checking existence first + // If file doesn't exist, readFile will throw ENOENT which we handle below + const content = await readFile(storePath, 'utf-8'); + const data = JSON.parse(content); + return parseAndMigrateProfileData(data); + } catch (error) { + // ENOENT is expected if file doesn't exist yet + if ((error as NodeJS.ErrnoException).code !== 'ENOENT') { + console.error('[ProfileStorage] Error loading profiles:', error); + } + } + + return null; +} + /** * Save profiles to disk */ diff --git a/apps/frontend/src/main/claude-profile/profile-utils.ts b/apps/frontend/src/main/claude-profile/profile-utils.ts index 557d8fae0e..80a3c048cb 100644 --- a/apps/frontend/src/main/claude-profile/profile-utils.ts +++ b/apps/frontend/src/main/claude-profile/profile-utils.ts @@ -56,7 +56,7 @@ export async function createProfileDirectory(profileName: string): Promise { + try { + await fsPromises.access(filePath); + return true; + } catch { + return false; + } +} import type { ToolDetectionResult } from '../shared/types'; import { findHomebrewPython as findHomebrewPythonUtil } from './utils/homebrew-python'; +import { + getWindowsExecutablePaths, + getWindowsExecutablePathsAsync, + WINDOWS_GIT_PATHS, + findWindowsExecutableViaWhere, + findWindowsExecutableViaWhereAsync, +} from './utils/windows-paths'; /** * Supported CLI tools managed by this system @@ -103,6 +130,139 @@ function isWrongPlatformPath(pathStr: string | undefined): boolean { return false; } +// ============================================================================ +// SHARED HELPERS - Used by both sync and async Claude detection +// ============================================================================ + +/** + * Configuration for Claude CLI detection paths + */ +interface ClaudeDetectionPaths { + /** Homebrew paths for macOS (Apple Silicon and Intel) */ + homebrewPaths: string[]; + /** Platform-specific standard installation paths */ + platformPaths: string[]; + /** Path to NVM versions directory for Node.js-installed Claude */ + nvmVersionsDir: string; +} + +/** + * Get all candidate paths for Claude CLI detection. + * + * Returns platform-specific paths where Claude CLI might be installed. + * This pure function consolidates path configuration used by both sync + * and async detection methods. + * + * @param homeDir - User's home directory (from os.homedir()) + * @returns Object containing homebrew, platform, and NVM paths + * + * @example + * const paths = getClaudeDetectionPaths('/Users/john'); + * // On macOS: { homebrewPaths: ['/opt/homebrew/bin/claude', ...], ... } + */ +export function getClaudeDetectionPaths(homeDir: string): ClaudeDetectionPaths { + const homebrewPaths = [ + '/opt/homebrew/bin/claude', // Apple Silicon + '/usr/local/bin/claude', // Intel Mac + ]; + + const platformPaths = process.platform === 'win32' + ? [ + path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude', 'claude.exe'), + path.join(homeDir, 'AppData', 'Roaming', 'npm', 'claude.cmd'), + path.join(homeDir, '.local', 'bin', 'claude.exe'), + 'C:\\Program Files\\Claude\\claude.exe', + 'C:\\Program Files (x86)\\Claude\\claude.exe', + ] + : [ + path.join(homeDir, '.local', 'bin', 'claude'), + path.join(homeDir, 'bin', 'claude'), + ]; + + const nvmVersionsDir = path.join(homeDir, '.nvm', 'versions', 'node'); + + return { homebrewPaths, platformPaths, nvmVersionsDir }; +} + +/** + * Sort NVM version directories by semantic version (newest first). + * + * Filters entries to only include directories starting with 'v' (version directories) + * and sorts them in descending order so the newest Node.js version is checked first. + * + * @param entries - Directory entries from readdir with { name, isDirectory() } + * @returns Array of version directory names sorted newest first + * + * @example + * const entries = [ + * { name: 'v18.0.0', isDirectory: () => true }, + * { name: 'v20.0.0', isDirectory: () => true }, + * { name: '.DS_Store', isDirectory: () => false }, + * ]; + * sortNvmVersionDirs(entries); // ['v20.0.0', 'v18.0.0'] + */ +export function sortNvmVersionDirs( + entries: Array<{ name: string; isDirectory(): boolean }> +): string[] { + // Regex to match valid semver directories: v20.0.0, v18.17.1, etc. + // This prevents NaN from malformed versions (e.g., v20.abc.1) breaking sort + const semverRegex = /^v\d+\.\d+\.\d+$/; + + return entries + .filter((entry) => entry.isDirectory() && semverRegex.test(entry.name)) + .sort((a, b) => { + // Parse version numbers: v20.0.0 -> [20, 0, 0] + const vA = a.name.slice(1).split('.').map(Number); + const vB = b.name.slice(1).split('.').map(Number); + // Compare major, minor, patch in order (descending) + for (let i = 0; i < 3; i++) { + const diff = (vB[i] ?? 0) - (vA[i] ?? 0); + if (diff !== 0) return diff; + } + return 0; + }) + .map((entry) => entry.name); +} + +/** + * Build a ToolDetectionResult from a validation result. + * + * Returns null if validation failed, otherwise constructs the full result object. + * This helper consolidates the result-building logic used throughout detection. + * + * @param claudePath - The path that was validated + * @param validation - The validation result from validateClaude/validateClaudeAsync + * @param source - The source of detection ('user-config', 'homebrew', 'system-path', 'nvm') + * @param messagePrefix - Prefix for the success message (e.g., 'Using Homebrew Claude CLI') + * @returns ToolDetectionResult if valid, null if validation failed + * + * @example + * const result = buildClaudeDetectionResult( + * '/opt/homebrew/bin/claude', + * { valid: true, version: '1.0.0', message: 'OK' }, + * 'homebrew', + * 'Using Homebrew Claude CLI' + * ); + * // Returns: { found: true, path: '/opt/homebrew/bin/claude', version: '1.0.0', ... } + */ +export function buildClaudeDetectionResult( + claudePath: string, + validation: ToolValidation, + source: ToolDetectionResult['source'], + messagePrefix: string +): ToolDetectionResult | null { + if (!validation.valid) { + return null; + } + return { + found: true, + path: claudePath, + version: validation.version, + source, + message: `${messagePrefix}: ${claudePath}`, + }; +} + /** * Centralized CLI Tool Manager * @@ -392,7 +552,40 @@ class CLIToolManager { } } - // 4. Not found - fallback to 'git' + // 4. Windows-specific detection using 'where' command (most reliable for custom installs) + if (process.platform === 'win32') { + // First try 'where' command - finds git regardless of installation location + const whereGitPath = findWindowsExecutableViaWhere('git', '[Git]'); + if (whereGitPath) { + const validation = this.validateGit(whereGitPath); + if (validation.valid) { + return { + found: true, + path: whereGitPath, + version: validation.version, + source: 'system-path', + message: `Using Windows Git: ${whereGitPath}`, + }; + } + } + + // Fallback to checking common installation paths + const windowsPaths = getWindowsExecutablePaths(WINDOWS_GIT_PATHS, '[Git]'); + for (const winGitPath of windowsPaths) { + const validation = this.validateGit(winGitPath); + if (validation.valid) { + return { + found: true, + path: winGitPath, + version: validation.version, + source: 'system-path', + message: `Using Windows Git: ${winGitPath}`, + }; + } + } + } + + // 5. Not found - fallback to 'git' return { found: false, source: 'fallback', @@ -517,99 +710,75 @@ class CLIToolManager { * @returns Detection result for Claude CLI */ private detectClaude(): ToolDetectionResult { + const homeDir = os.homedir(); + const paths = getClaudeDetectionPaths(homeDir); + // 1. User configuration if (this.userConfig.claudePath) { - // Check if path is from wrong platform (e.g., Windows path on macOS) if (isWrongPlatformPath(this.userConfig.claudePath)) { console.warn( `[Claude CLI] User-configured path is from different platform, ignoring: ${this.userConfig.claudePath}` ); } else { const validation = this.validateClaude(this.userConfig.claudePath); - if (validation.valid) { - return { - found: true, - path: this.userConfig.claudePath, - version: validation.version, - source: 'user-config', - message: `Using user-configured Claude CLI: ${this.userConfig.claudePath}`, - }; - } - console.warn( - `[Claude CLI] User-configured path invalid: ${validation.message}` + const result = buildClaudeDetectionResult( + this.userConfig.claudePath, validation, 'user-config', 'Using user-configured Claude CLI' ); + if (result) return result; + console.warn(`[Claude CLI] User-configured path invalid: ${validation.message}`); } } // 2. Homebrew (macOS) if (process.platform === 'darwin') { - const homebrewPaths = [ - '/opt/homebrew/bin/claude', // Apple Silicon - '/usr/local/bin/claude', // Intel Mac - ]; - - for (const claudePath of homebrewPaths) { + for (const claudePath of paths.homebrewPaths) { if (existsSync(claudePath)) { const validation = this.validateClaude(claudePath); - if (validation.valid) { - return { - found: true, - path: claudePath, - version: validation.version, - source: 'homebrew', - message: `Using Homebrew Claude CLI: ${claudePath}`, - }; - } + const result = buildClaudeDetectionResult(claudePath, validation, 'homebrew', 'Using Homebrew Claude CLI'); + if (result) return result; } } } // 3. System PATH (augmented) - const claudePath = findExecutable('claude'); - if (claudePath) { - const validation = this.validateClaude(claudePath); - if (validation.valid) { - return { - found: true, - path: claudePath, - version: validation.version, - source: 'system-path', - message: `Using system Claude CLI: ${claudePath}`, - }; + const systemClaudePath = findExecutable('claude'); + if (systemClaudePath) { + const validation = this.validateClaude(systemClaudePath); + const result = buildClaudeDetectionResult(systemClaudePath, validation, 'system-path', 'Using system Claude CLI'); + if (result) return result; + } + + // 4. NVM paths (Unix only) - check before platform paths for better Node.js integration + if (process.platform !== 'win32') { + try { + if (existsSync(paths.nvmVersionsDir)) { + const nodeVersions = readdirSync(paths.nvmVersionsDir, { withFileTypes: true }); + const versionNames = sortNvmVersionDirs(nodeVersions); + + for (const versionName of versionNames) { + const nvmClaudePath = path.join(paths.nvmVersionsDir, versionName, 'bin', 'claude'); + if (existsSync(nvmClaudePath)) { + const validation = this.validateClaude(nvmClaudePath); + const result = buildClaudeDetectionResult(nvmClaudePath, validation, 'nvm', 'Using NVM Claude CLI'); + if (result) return result; + } + } + } + } catch (error) { + console.warn(`[Claude CLI] Unable to read NVM directory: ${error}`); } } - // 4. Platform-specific standard locations - const homeDir = os.homedir(); - const platformPaths = process.platform === 'win32' - ? [ - path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude', 'claude.exe'), - path.join(homeDir, 'AppData', 'Roaming', 'npm', 'claude.cmd'), - path.join(homeDir, '.local', 'bin', 'claude.exe'), - 'C:\\Program Files\\Claude\\claude.exe', - 'C:\\Program Files (x86)\\Claude\\claude.exe', - ] - : [ - path.join(homeDir, '.local', 'bin', 'claude'), - path.join(homeDir, 'bin', 'claude'), - ]; - - for (const claudePath of platformPaths) { + // 5. Platform-specific standard locations + for (const claudePath of paths.platformPaths) { if (existsSync(claudePath)) { const validation = this.validateClaude(claudePath); - if (validation.valid) { - return { - found: true, - path: claudePath, - version: validation.version, - source: 'system-path', - message: `Using Claude CLI: ${claudePath}`, - }; - } + const result = buildClaudeDetectionResult(claudePath, validation, 'system-path', 'Using Claude CLI'); + if (result) return result; } } - // 5. Not found + // 6. Not found return { found: false, source: 'fallback', @@ -759,6 +928,7 @@ class CLIToolManager { timeout: 5000, windowsHide: true, shell: needsShell, + env: getAugmentedEnv(), }).trim(); // Claude CLI version output format: "claude-code version X.Y.Z" or similar @@ -778,116 +948,747 @@ class CLIToolManager { } } + // ============================================================================ + // ASYNC METHODS - Non-blocking alternatives for Electron main process + // ============================================================================ + /** - * Get bundled Python path for packaged apps + * Get the path for a CLI tool asynchronously (non-blocking) * - * Only available in packaged Electron apps where Python is bundled - * in the resources directory. + * Uses cached path if available, otherwise detects asynchronously. + * Safe to call from Electron main process without blocking. * - * @returns Path to bundled Python or null if not found + * @param tool - The CLI tool to get the path for + * @returns Promise resolving to the tool path */ - private getBundledPythonPath(): string | null { - if (!app.isPackaged) { - return null; + async getToolPathAsync(tool: CLITool): Promise { + // Check cache first (instant return if cached) + const cached = this.cache.get(tool); + if (cached) { + console.warn( + `[CLI Tools] Using cached ${tool}: ${cached.path} (${cached.source})` + ); + return cached.path; } - const resourcesPath = process.resourcesPath; - const isWindows = process.platform === 'win32'; - - const pythonPath = isWindows - ? path.join(resourcesPath, 'python', 'python.exe') - : path.join(resourcesPath, 'python', 'bin', 'python3'); + // Detect asynchronously + const result = await this.detectToolPathAsync(tool); + if (result.found && result.path) { + this.cache.set(tool, { + path: result.path, + version: result.version, + source: result.source, + }); + console.warn(`[CLI Tools] Detected ${tool}: ${result.path} (${result.source})`); + return result.path; + } - return existsSync(pythonPath) ? pythonPath : null; + // Fallback to tool name (let system PATH resolve it) + console.warn(`[CLI Tools] ${tool} not found, using fallback: "${tool}"`); + return tool; } /** - * Find Homebrew Python on macOS - * Delegates to shared utility function. + * Detect tool path asynchronously * - * @returns Path to Homebrew Python or null if not found + * All tools now use async detection methods to prevent blocking the main process. + * + * @param tool - The tool to detect + * @returns Promise resolving to detection result */ - private findHomebrewPython(): string | null { - return findHomebrewPythonUtil( - (pythonPath) => this.validatePython(pythonPath), - '[CLI Tools]' - ); + private async detectToolPathAsync(tool: CLITool): Promise { + switch (tool) { + case 'claude': + return this.detectClaudeAsync(); + case 'python': + return this.detectPythonAsync(); + case 'git': + return this.detectGitAsync(); + case 'gh': + return this.detectGitHubCLIAsync(); + default: + return { + found: false, + source: 'fallback', + message: `Unknown tool: ${tool}`, + }; + } } /** - * Clear cache manually + * Validate Claude CLI asynchronously (non-blocking) * - * Useful for testing or forcing re-detection. - * Normally not needed as cache is cleared automatically on settings change. + * @param claudeCmd - The Claude CLI command to validate + * @returns Promise resolving to validation result */ - clearCache(): void { - this.cache.clear(); - console.warn('[CLI Tools] Cache cleared'); + private async validateClaudeAsync(claudeCmd: string): Promise { + try { + const needsShell = process.platform === 'win32' && + (claudeCmd.endsWith('.cmd') || claudeCmd.endsWith('.bat')); + + const { stdout } = await execFileAsync(claudeCmd, ['--version'], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + shell: needsShell, + env: await getAugmentedEnvAsync(), + }); + + const version = stdout.trim(); + const match = version.match(/(\d+\.\d+\.\d+)/); + const versionStr = match ? match[1] : version.split('\n')[0]; + + return { + valid: true, + version: versionStr, + message: `Claude CLI ${versionStr} is available`, + }; + } catch (error) { + return { + valid: false, + message: `Failed to validate Claude CLI: ${error instanceof Error ? error.message : String(error)}`, + }; + } } /** - * Get tool detection info for diagnostics - * - * Performs fresh detection without using cache. - * Useful for Settings UI to show current detection status. + * Validate Python version asynchronously (non-blocking) * - * @param tool - The tool to get detection info for - * @returns Detection result with full metadata + * @param pythonCmd - The Python command to validate + * @returns Promise resolving to validation result */ - getToolInfo(tool: CLITool): ToolDetectionResult { - return this.detectToolPath(tool); - } -} + private async validatePythonAsync(pythonCmd: string): Promise { + const MINIMUM_VERSION = '3.10.0'; -// Singleton instance -const cliToolManager = new CLIToolManager(); + try { + const parts = pythonCmd.split(' '); + const cmd = parts[0]; + const args = [...parts.slice(1), '--version']; -/** - * Get the path for a CLI tool - * - * Convenience function for accessing the tool manager singleton. - * Uses cached path if available, otherwise auto-detects. - * - * @param tool - The CLI tool to get the path for - * @returns The resolved path to the tool executable - * - * @example - * ```typescript - * import { getToolPath } from './cli-tool-manager'; - * - * const pythonPath = getToolPath('python'); - * const gitPath = getToolPath('git'); - * const ghPath = getToolPath('gh'); - * - * execSync(`${gitPath} status`, { cwd: projectPath }); - * ``` - */ -export function getToolPath(tool: CLITool): string { - return cliToolManager.getToolPath(tool); -} + const { stdout } = await execFileAsync(cmd, args, { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + env: await getAugmentedEnvAsync(), + }); -/** - * Configure CLI tools with user settings - * - * Call this when user updates CLI tool paths in Settings. - * Clears cache to force re-detection with new configuration. - * - * @param config - User configuration for CLI tool paths - * - * @example - * ```typescript - * import { configureTools } from './cli-tool-manager'; - * - * // When settings are loaded or updated - * configureTools({ - * pythonPath: settings.pythonPath, - * gitPath: settings.gitPath, - * githubCLIPath: settings.githubCLIPath, - * }); - * ``` - */ -export function configureTools(config: ToolConfig): void { - cliToolManager.configure(config); + const version = stdout.trim(); + const match = version.match(/Python (\d+\.\d+\.\d+)/); + if (!match) { + return { + valid: false, + message: 'Unable to detect Python version', + }; + } + + const versionStr = match[1]; + const [major, minor] = versionStr.split('.').map(Number); + const [reqMajor, reqMinor] = MINIMUM_VERSION.split('.').map(Number); + + const meetsRequirement = + major > reqMajor || (major === reqMajor && minor >= reqMinor); + + if (!meetsRequirement) { + return { + valid: false, + version: versionStr, + message: `Python ${versionStr} is too old. Requires ${MINIMUM_VERSION}+`, + }; + } + + return { + valid: true, + version: versionStr, + message: `Python ${versionStr} meets requirements`, + }; + } catch (error) { + return { + valid: false, + message: `Failed to validate Python: ${error}`, + }; + } + } + + /** + * Validate Git asynchronously (non-blocking) + * + * @param gitCmd - The Git command to validate + * @returns Promise resolving to validation result + */ + private async validateGitAsync(gitCmd: string): Promise { + try { + const { stdout } = await execFileAsync(gitCmd, ['--version'], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + env: await getAugmentedEnvAsync(), + }); + + const version = stdout.trim(); + const match = version.match(/git version (\d+\.\d+\.\d+)/); + const versionStr = match ? match[1] : version; + + return { + valid: true, + version: versionStr, + message: `Git ${versionStr} is available`, + }; + } catch (error) { + return { + valid: false, + message: `Failed to validate Git: ${error instanceof Error ? error.message : String(error)}`, + }; + } + } + + /** + * Validate GitHub CLI asynchronously (non-blocking) + * + * @param ghCmd - The GitHub CLI command to validate + * @returns Promise resolving to validation result + */ + private async validateGitHubCLIAsync(ghCmd: string): Promise { + try { + const { stdout } = await execFileAsync(ghCmd, ['--version'], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + env: await getAugmentedEnvAsync(), + }); + + const version = stdout.trim(); + const match = version.match(/gh version (\d+\.\d+\.\d+)/); + const versionStr = match ? match[1] : version.split('\n')[0]; + + return { + valid: true, + version: versionStr, + message: `GitHub CLI ${versionStr} is available`, + }; + } catch (error) { + return { + valid: false, + message: `Failed to validate GitHub CLI: ${error instanceof Error ? error.message : String(error)}`, + }; + } + } + + /** + * Detect Claude CLI asynchronously (non-blocking) + * + * Same detection logic as detectClaude but uses async validation. + * + * @returns Promise resolving to detection result + */ + private async detectClaudeAsync(): Promise { + const homeDir = os.homedir(); + const paths = getClaudeDetectionPaths(homeDir); + + // 1. User configuration + if (this.userConfig.claudePath) { + if (isWrongPlatformPath(this.userConfig.claudePath)) { + console.warn( + `[Claude CLI] User-configured path is from different platform, ignoring: ${this.userConfig.claudePath}` + ); + } else { + const validation = await this.validateClaudeAsync(this.userConfig.claudePath); + const result = buildClaudeDetectionResult( + this.userConfig.claudePath, validation, 'user-config', 'Using user-configured Claude CLI' + ); + if (result) return result; + console.warn(`[Claude CLI] User-configured path invalid: ${validation.message}`); + } + } + + // 2. Homebrew (macOS) + if (process.platform === 'darwin') { + for (const claudePath of paths.homebrewPaths) { + if (await existsAsync(claudePath)) { + const validation = await this.validateClaudeAsync(claudePath); + const result = buildClaudeDetectionResult(claudePath, validation, 'homebrew', 'Using Homebrew Claude CLI'); + if (result) return result; + } + } + } + + // 3. System PATH (augmented) - using async findExecutable + const systemClaudePath = await findExecutableAsync('claude'); + if (systemClaudePath) { + const validation = await this.validateClaudeAsync(systemClaudePath); + const result = buildClaudeDetectionResult(systemClaudePath, validation, 'system-path', 'Using system Claude CLI'); + if (result) return result; + } + + // 4. NVM paths (Unix only) - check before platform paths for better Node.js integration + if (process.platform !== 'win32') { + try { + if (await existsAsync(paths.nvmVersionsDir)) { + const nodeVersions = await fsPromises.readdir(paths.nvmVersionsDir, { withFileTypes: true }); + const versionNames = sortNvmVersionDirs(nodeVersions); + + for (const versionName of versionNames) { + const nvmClaudePath = path.join(paths.nvmVersionsDir, versionName, 'bin', 'claude'); + if (await existsAsync(nvmClaudePath)) { + const validation = await this.validateClaudeAsync(nvmClaudePath); + const result = buildClaudeDetectionResult(nvmClaudePath, validation, 'nvm', 'Using NVM Claude CLI'); + if (result) return result; + } + } + } + } catch (error) { + console.warn(`[Claude CLI] Unable to read NVM directory: ${error}`); + } + } + + // 5. Platform-specific standard locations + for (const claudePath of paths.platformPaths) { + if (await existsAsync(claudePath)) { + const validation = await this.validateClaudeAsync(claudePath); + const result = buildClaudeDetectionResult(claudePath, validation, 'system-path', 'Using Claude CLI'); + if (result) return result; + } + } + + // 6. Not found + return { + found: false, + source: 'fallback', + message: 'Claude CLI not found. Install from https://claude.ai/download', + }; + } + + /** + * Detect Python asynchronously (non-blocking) + * + * Same detection logic as detectPython but uses async validation. + * + * @returns Promise resolving to detection result + */ + private async detectPythonAsync(): Promise { + const MINIMUM_VERSION = '3.10.0'; + + // 1. User configuration + if (this.userConfig.pythonPath) { + if (isWrongPlatformPath(this.userConfig.pythonPath)) { + console.warn( + `[Python] User-configured path is from different platform, ignoring: ${this.userConfig.pythonPath}` + ); + } else { + const validation = await this.validatePythonAsync(this.userConfig.pythonPath); + if (validation.valid) { + return { + found: true, + path: this.userConfig.pythonPath, + version: validation.version, + source: 'user-config', + message: `Using user-configured Python: ${this.userConfig.pythonPath}`, + }; + } + console.warn(`[Python] User-configured path invalid: ${validation.message}`); + } + } + + // 2. Bundled Python (packaged apps only) + if (app.isPackaged) { + const bundledPath = this.getBundledPythonPath(); + if (bundledPath) { + const validation = await this.validatePythonAsync(bundledPath); + if (validation.valid) { + return { + found: true, + path: bundledPath, + version: validation.version, + source: 'bundled', + message: `Using bundled Python: ${bundledPath}`, + }; + } + } + } + + // 3. Homebrew Python (macOS) - simplified async version + if (process.platform === 'darwin') { + const homebrewPaths = [ + '/opt/homebrew/bin/python3', + '/opt/homebrew/bin/python3.12', + '/opt/homebrew/bin/python3.11', + '/opt/homebrew/bin/python3.10', + '/usr/local/bin/python3', + ]; + for (const pythonPath of homebrewPaths) { + if (await existsAsync(pythonPath)) { + const validation = await this.validatePythonAsync(pythonPath); + if (validation.valid) { + return { + found: true, + path: pythonPath, + version: validation.version, + source: 'homebrew', + message: `Using Homebrew Python: ${pythonPath}`, + }; + } + } + } + } + + // 4. System PATH (augmented) + const candidates = + process.platform === 'win32' + ? ['py -3', 'python', 'python3', 'py'] + : ['python3', 'python']; + + for (const cmd of candidates) { + if (cmd.startsWith('py ')) { + const validation = await this.validatePythonAsync(cmd); + if (validation.valid) { + return { + found: true, + path: cmd, + version: validation.version, + source: 'system-path', + message: `Using system Python: ${cmd}`, + }; + } + } else { + const pythonPath = await findExecutableAsync(cmd); + if (pythonPath) { + const validation = await this.validatePythonAsync(pythonPath); + if (validation.valid) { + return { + found: true, + path: pythonPath, + version: validation.version, + source: 'system-path', + message: `Using system Python: ${pythonPath}`, + }; + } + } + } + } + + // 5. Not found + return { + found: false, + source: 'fallback', + message: + `Python ${MINIMUM_VERSION}+ not found. ` + + 'Please install Python or configure in Settings.', + }; + } + + /** + * Detect Git asynchronously (non-blocking) + * + * Same detection logic as detectGit but uses async validation. + * + * @returns Promise resolving to detection result + */ + private async detectGitAsync(): Promise { + // 1. User configuration + if (this.userConfig.gitPath) { + if (isWrongPlatformPath(this.userConfig.gitPath)) { + console.warn( + `[Git] User-configured path is from different platform, ignoring: ${this.userConfig.gitPath}` + ); + } else { + const validation = await this.validateGitAsync(this.userConfig.gitPath); + if (validation.valid) { + return { + found: true, + path: this.userConfig.gitPath, + version: validation.version, + source: 'user-config', + message: `Using user-configured Git: ${this.userConfig.gitPath}`, + }; + } + console.warn(`[Git] User-configured path invalid: ${validation.message}`); + } + } + + // 2. Homebrew (macOS) + if (process.platform === 'darwin') { + const homebrewPaths = [ + '/opt/homebrew/bin/git', + '/usr/local/bin/git', + ]; + + for (const gitPath of homebrewPaths) { + if (await existsAsync(gitPath)) { + const validation = await this.validateGitAsync(gitPath); + if (validation.valid) { + return { + found: true, + path: gitPath, + version: validation.version, + source: 'homebrew', + message: `Using Homebrew Git: ${gitPath}`, + }; + } + } + } + } + + // 3. System PATH (augmented) + const gitPath = await findExecutableAsync('git'); + if (gitPath) { + const validation = await this.validateGitAsync(gitPath); + if (validation.valid) { + return { + found: true, + path: gitPath, + version: validation.version, + source: 'system-path', + message: `Using system Git: ${gitPath}`, + }; + } + } + + // 4. Windows-specific detection (async to avoid blocking main process) + if (process.platform === 'win32') { + const whereGitPath = await findWindowsExecutableViaWhereAsync('git', '[Git]'); + if (whereGitPath) { + const validation = await this.validateGitAsync(whereGitPath); + if (validation.valid) { + return { + found: true, + path: whereGitPath, + version: validation.version, + source: 'system-path', + message: `Using Windows Git: ${whereGitPath}`, + }; + } + } + + const windowsPaths = await getWindowsExecutablePathsAsync(WINDOWS_GIT_PATHS, '[Git]'); + for (const winGitPath of windowsPaths) { + const validation = await this.validateGitAsync(winGitPath); + if (validation.valid) { + return { + found: true, + path: winGitPath, + version: validation.version, + source: 'system-path', + message: `Using Windows Git: ${winGitPath}`, + }; + } + } + } + + // 5. Not found + return { + found: false, + source: 'fallback', + message: 'Git not found in standard locations. Using fallback "git".', + }; + } + + /** + * Detect GitHub CLI asynchronously (non-blocking) + * + * Same detection logic as detectGitHubCLI but uses async validation. + * + * @returns Promise resolving to detection result + */ + private async detectGitHubCLIAsync(): Promise { + // 1. User configuration + if (this.userConfig.githubCLIPath) { + if (isWrongPlatformPath(this.userConfig.githubCLIPath)) { + console.warn( + `[GitHub CLI] User-configured path is from different platform, ignoring: ${this.userConfig.githubCLIPath}` + ); + } else { + const validation = await this.validateGitHubCLIAsync(this.userConfig.githubCLIPath); + if (validation.valid) { + return { + found: true, + path: this.userConfig.githubCLIPath, + version: validation.version, + source: 'user-config', + message: `Using user-configured GitHub CLI: ${this.userConfig.githubCLIPath}`, + }; + } + console.warn(`[GitHub CLI] User-configured path invalid: ${validation.message}`); + } + } + + // 2. Homebrew (macOS) + if (process.platform === 'darwin') { + const homebrewPaths = [ + '/opt/homebrew/bin/gh', + '/usr/local/bin/gh', + ]; + + for (const ghPath of homebrewPaths) { + if (await existsAsync(ghPath)) { + const validation = await this.validateGitHubCLIAsync(ghPath); + if (validation.valid) { + return { + found: true, + path: ghPath, + version: validation.version, + source: 'homebrew', + message: `Using Homebrew GitHub CLI: ${ghPath}`, + }; + } + } + } + } + + // 3. System PATH (augmented) + const ghPath = await findExecutableAsync('gh'); + if (ghPath) { + const validation = await this.validateGitHubCLIAsync(ghPath); + if (validation.valid) { + return { + found: true, + path: ghPath, + version: validation.version, + source: 'system-path', + message: `Using system GitHub CLI: ${ghPath}`, + }; + } + } + + // 4. Windows Program Files + if (process.platform === 'win32') { + const windowsPaths = [ + 'C:\\Program Files\\GitHub CLI\\gh.exe', + 'C:\\Program Files (x86)\\GitHub CLI\\gh.exe', + ]; + + for (const winGhPath of windowsPaths) { + if (await existsAsync(winGhPath)) { + const validation = await this.validateGitHubCLIAsync(winGhPath); + if (validation.valid) { + return { + found: true, + path: winGhPath, + version: validation.version, + source: 'system-path', + message: `Using Windows GitHub CLI: ${winGhPath}`, + }; + } + } + } + } + + // 5. Not found + return { + found: false, + source: 'fallback', + message: 'GitHub CLI (gh) not found. Install from https://cli.github.com', + }; + } + + /** + * Get bundled Python path for packaged apps + * + * Only available in packaged Electron apps where Python is bundled + * in the resources directory. + * + * @returns Path to bundled Python or null if not found + */ + private getBundledPythonPath(): string | null { + if (!app.isPackaged) { + return null; + } + + const resourcesPath = process.resourcesPath; + const isWindows = process.platform === 'win32'; + + const pythonPath = isWindows + ? path.join(resourcesPath, 'python', 'python.exe') + : path.join(resourcesPath, 'python', 'bin', 'python3'); + + return existsSync(pythonPath) ? pythonPath : null; + } + + /** + * Find Homebrew Python on macOS + * Delegates to shared utility function. + * + * @returns Path to Homebrew Python or null if not found + */ + private findHomebrewPython(): string | null { + return findHomebrewPythonUtil( + (pythonPath) => this.validatePython(pythonPath), + '[CLI Tools]' + ); + } + + /** + * Clear cache manually + * + * Useful for testing or forcing re-detection. + * Normally not needed as cache is cleared automatically on settings change. + */ + clearCache(): void { + this.cache.clear(); + console.warn('[CLI Tools] Cache cleared'); + } + + /** + * Get tool detection info for diagnostics + * + * Performs fresh detection without using cache. + * Useful for Settings UI to show current detection status. + * + * @param tool - The tool to get detection info for + * @returns Detection result with full metadata + */ + getToolInfo(tool: CLITool): ToolDetectionResult { + return this.detectToolPath(tool); + } +} + +// Singleton instance +const cliToolManager = new CLIToolManager(); + +/** + * Get the path for a CLI tool + * + * Convenience function for accessing the tool manager singleton. + * Uses cached path if available, otherwise auto-detects. + * + * @param tool - The CLI tool to get the path for + * @returns The resolved path to the tool executable + * + * @example + * ```typescript + * import { getToolPath } from './cli-tool-manager'; + * + * const pythonPath = getToolPath('python'); + * const gitPath = getToolPath('git'); + * const ghPath = getToolPath('gh'); + * + * execSync(`${gitPath} status`, { cwd: projectPath }); + * ``` + */ +export function getToolPath(tool: CLITool): string { + return cliToolManager.getToolPath(tool); +} + +/** + * Configure CLI tools with user settings + * + * Call this when user updates CLI tool paths in Settings. + * Clears cache to force re-detection with new configuration. + * + * @param config - User configuration for CLI tool paths + * + * @example + * ```typescript + * import { configureTools } from './cli-tool-manager'; + * + * // When settings are loaded or updated + * configureTools({ + * pythonPath: settings.pythonPath, + * gitPath: settings.gitPath, + * githubCLIPath: settings.githubCLIPath, + * }); + * ``` + */ +export function configureTools(config: ToolConfig): void { + cliToolManager.configure(config); } /** @@ -951,3 +1752,52 @@ export function clearToolCache(): void { export function isPathFromWrongPlatform(pathStr: string | undefined): boolean { return isWrongPlatformPath(pathStr); } + +// ============================================================================ +// ASYNC EXPORTS - Non-blocking alternatives for Electron main process +// ============================================================================ + +/** + * Get the path for a CLI tool asynchronously (non-blocking) + * + * Safe to call from Electron main process without blocking the event loop. + * Uses cached path if available, otherwise detects asynchronously. + * + * @param tool - The CLI tool to get the path for + * @returns Promise resolving to the tool path + * + * @example + * ```typescript + * import { getToolPathAsync } from './cli-tool-manager'; + * + * const claudePath = await getToolPathAsync('claude'); + * ``` + */ +export async function getToolPathAsync(tool: CLITool): Promise { + return cliToolManager.getToolPathAsync(tool); +} + +/** + * Pre-warm the CLI tool cache asynchronously + * + * Call this during app startup to detect tools in the background. + * Subsequent calls to getToolPath/getToolPathAsync will use cached values. + * + * @param tools - Array of tools to pre-warm (defaults to ['claude']) + * + * @example + * ```typescript + * import { preWarmToolCache } from './cli-tool-manager'; + * + * // In app startup + * app.whenReady().then(() => { + * // ... setup code ... + * preWarmToolCache(['claude', 'git', 'gh']); + * }); + * ``` + */ +export async function preWarmToolCache(tools: CLITool[] = ['claude']): Promise { + console.warn('[CLI Tools] Pre-warming cache for:', tools.join(', ')); + await Promise.all(tools.map(tool => cliToolManager.getToolPathAsync(tool))); + console.warn('[CLI Tools] Cache pre-warming complete'); +} diff --git a/apps/frontend/src/main/env-utils.ts b/apps/frontend/src/main/env-utils.ts index 9a1325ce15..01972d6af0 100644 --- a/apps/frontend/src/main/env-utils.ts +++ b/apps/frontend/src/main/env-utils.ts @@ -12,7 +12,32 @@ import * as os from 'os'; import * as path from 'path'; import * as fs from 'fs'; -import { execFileSync } from 'child_process'; +import { promises as fsPromises } from 'fs'; +import { execFileSync, execFile } from 'child_process'; +import { promisify } from 'util'; + +const execFileAsync = promisify(execFile); + +/** + * Check if a path exists asynchronously (non-blocking) + * + * Uses fs.promises.access which is non-blocking, unlike fs.existsSync. + * + * @param filePath - The path to check + * @returns Promise resolving to true if path exists, false otherwise + */ +async function existsAsync(filePath: string): Promise { + try { + await fsPromises.access(filePath); + return true; + } catch { + return false; + } +} + +// Cache for npm global prefix to avoid repeated async calls +let npmGlobalPrefixCache: string | null | undefined = undefined; +let npmGlobalPrefixCachePromise: Promise | null = null; /** * Get npm global prefix directory dynamically @@ -30,10 +55,12 @@ function getNpmGlobalPrefix(): string | null { // On Windows, use npm.cmd for proper command resolution const npmCommand = process.platform === 'win32' ? 'npm.cmd' : 'npm'; - const rawPrefix = execFileSync(npmCommand, ['config', 'get', 'prefix'], { + // Use --location=global to bypass workspace context and avoid ENOWORKSPACES error + const rawPrefix = execFileSync(npmCommand, ['config', 'get', 'prefix', '--location=global'], { encoding: 'utf-8', timeout: 3000, windowsHide: true, + cwd: os.homedir(), // Run from home dir to avoid ENOWORKSPACES error in monorepos shell: process.platform === 'win32', // Enable shell on Windows for .cmd resolution }).trim(); @@ -60,19 +87,22 @@ function getNpmGlobalPrefix(): string | null { * Common binary directories that should be in PATH * These are locations where commonly used tools are installed */ -const COMMON_BIN_PATHS: Record = { +export const COMMON_BIN_PATHS: Record = { darwin: [ '/opt/homebrew/bin', // Apple Silicon Homebrew '/usr/local/bin', // Intel Homebrew / system + '/usr/local/share/dotnet', // .NET SDK '/opt/homebrew/sbin', // Apple Silicon Homebrew sbin '/usr/local/sbin', // Intel Homebrew sbin '~/.local/bin', // User-local binaries (Claude CLI) + '~/.dotnet/tools', // .NET global tools ], linux: [ '/usr/local/bin', '/usr/bin', // System binaries (Python, etc.) '/snap/bin', // Snap packages '~/.local/bin', // User-local binaries + '~/.dotnet/tools', // .NET global tools '/usr/sbin', // System admin binaries ], win32: [ @@ -82,6 +112,77 @@ const COMMON_BIN_PATHS: Record = { ], }; +/** + * Essential system directories that must always be in PATH + * Required for core system functionality (e.g., /usr/bin/security for Keychain access) + */ +const ESSENTIAL_SYSTEM_PATHS: string[] = ['/usr/bin', '/bin', '/usr/sbin', '/sbin']; + +/** + * Get expanded platform paths for PATH augmentation + * + * Shared helper used by both sync and async getAugmentedEnv functions. + * Expands home directory (~) in paths and returns the list of candidate paths. + * + * @param additionalPaths - Optional additional paths to include + * @returns Array of expanded paths (without existence checking) + */ +function getExpandedPlatformPaths(additionalPaths?: string[]): string[] { + const platform = process.platform as 'darwin' | 'linux' | 'win32'; + const homeDir = os.homedir(); + + // Get platform-specific paths and expand home directory + const platformPaths = COMMON_BIN_PATHS[platform] || []; + const expandedPaths = platformPaths.map(p => + p.startsWith('~') ? p.replace('~', homeDir) : p + ); + + // Add user-requested additional paths (expanded) + if (additionalPaths) { + for (const p of additionalPaths) { + const expanded = p.startsWith('~') ? p.replace('~', homeDir) : p; + expandedPaths.push(expanded); + } + } + + return expandedPaths; +} + +/** + * Build augmented PATH by filtering existing paths + * + * Shared helper that takes candidate paths and a set of current PATH entries, + * returning only paths that should be added. + * + * @param candidatePaths - Array of paths to consider adding + * @param currentPathSet - Set of paths already in PATH + * @param existingPaths - Array of paths that actually exist on the filesystem + * @param npmPrefix - npm global prefix path (or null if not found) + * @returns Array of paths to prepend to PATH + */ +function buildPathsToAdd( + candidatePaths: string[], + currentPathSet: Set, + existingPaths: Set, + npmPrefix: string | null +): string[] { + const pathsToAdd: string[] = []; + + // Add platform-specific paths that exist + for (const p of candidatePaths) { + if (!currentPathSet.has(p) && existingPaths.has(p)) { + pathsToAdd.push(p); + } + } + + // Add npm global prefix if it exists + if (npmPrefix && !currentPathSet.has(npmPrefix) && existingPaths.has(npmPrefix)) { + pathsToAdd.push(npmPrefix); + } + + return pathsToAdd; +} + /** * Get augmented environment with additional PATH entries * @@ -97,48 +198,44 @@ export function getAugmentedEnv(additionalPaths?: string[]): Record - p.startsWith('~') ? p.replace('~', homeDir) : p - ); + // Get all candidate paths (platform + additional) + const candidatePaths = getExpandedPlatformPaths(additionalPaths); - // Collect paths to add (only if they exist and aren't already in PATH) - const currentPath = env.PATH || ''; - const currentPathSet = new Set(currentPath.split(pathSeparator)); + // Ensure PATH has essential system directories when launched from Finder/Dock. + // When Electron launches from GUI (not terminal), PATH might be empty or minimal. + // The Claude Agent SDK needs /usr/bin/security to access macOS Keychain. + let currentPath = env.PATH || ''; - const pathsToAdd: string[] = []; + // On macOS/Linux, ensure basic system paths are always present + if (platform !== 'win32') { + const pathSetForEssentials = new Set(currentPath.split(pathSeparator).filter(Boolean)); + const missingEssentials = ESSENTIAL_SYSTEM_PATHS.filter(p => !pathSetForEssentials.has(p)); - // Add platform-specific paths - for (const p of expandedPaths) { - if (!currentPathSet.has(p) && fs.existsSync(p)) { - pathsToAdd.push(p); + if (missingEssentials.length > 0) { + // Append essential paths if missing (append, not prepend, to respect user's PATH) + currentPath = currentPath + ? `${currentPath}${pathSeparator}${missingEssentials.join(pathSeparator)}` + : missingEssentials.join(pathSeparator); } } - // Add npm global prefix dynamically (cross-platform: works with standard npm, nvm, nvm-windows) + // Collect paths to add (only if they exist and aren't already in PATH) + const currentPathSet = new Set(currentPath.split(pathSeparator).filter(Boolean)); + + // Check existence synchronously and build existing paths set + const existingPaths = new Set(candidatePaths.filter(p => fs.existsSync(p))); + + // Get npm global prefix dynamically const npmPrefix = getNpmGlobalPrefix(); - if (npmPrefix && !currentPathSet.has(npmPrefix) && fs.existsSync(npmPrefix)) { - pathsToAdd.push(npmPrefix); + if (npmPrefix && fs.existsSync(npmPrefix)) { + existingPaths.add(npmPrefix); } - // Add user-requested additional paths - if (additionalPaths) { - for (const p of additionalPaths) { - const expanded = p.startsWith('~') ? p.replace('~', homeDir) : p; - if (!currentPathSet.has(expanded) && fs.existsSync(expanded)) { - pathsToAdd.push(expanded); - } - } - } + // Build final paths to add using shared helper + const pathsToAdd = buildPathsToAdd(candidatePaths, currentPathSet, existingPaths, npmPrefix); // Prepend new paths to PATH (prepend so they take priority) - if (pathsToAdd.length > 0) { - env.PATH = [...pathsToAdd, currentPath].filter(Boolean).join(pathSeparator); - } + env.PATH = [...pathsToAdd, currentPath].filter(Boolean).join(pathSeparator); return env; } @@ -184,3 +281,160 @@ export function findExecutable(command: string): string | null { export function isCommandAvailable(command: string): boolean { return findExecutable(command) !== null; } + +// ============================================================================ +// ASYNC VERSIONS - Non-blocking alternatives for Electron main process +// ============================================================================ + +/** + * Get npm global prefix directory asynchronously (non-blocking) + * + * Uses caching to avoid repeated subprocess calls. Safe to call from + * Electron main process without blocking the event loop. + * + * @returns Promise resolving to npm global binaries directory, or null + */ +async function getNpmGlobalPrefixAsync(): Promise { + // Return cached value if available + if (npmGlobalPrefixCache !== undefined) { + return npmGlobalPrefixCache; + } + + // If a fetch is already in progress, wait for it + if (npmGlobalPrefixCachePromise) { + return npmGlobalPrefixCachePromise; + } + + // Start the async fetch + npmGlobalPrefixCachePromise = (async () => { + try { + const npmCommand = process.platform === 'win32' ? 'npm.cmd' : 'npm'; + + const { stdout } = await execFileAsync(npmCommand, ['config', 'get', 'prefix', '--location=global'], { + encoding: 'utf-8', + timeout: 3000, + windowsHide: true, + cwd: os.homedir(), // Run from home dir to avoid ENOWORKSPACES error in monorepos + shell: process.platform === 'win32', + }); + + const rawPrefix = stdout.trim(); + if (!rawPrefix) { + npmGlobalPrefixCache = null; + return null; + } + + const binPath = process.platform === 'win32' + ? rawPrefix + : path.join(rawPrefix, 'bin'); + + const normalizedPath = path.normalize(binPath); + npmGlobalPrefixCache = await existsAsync(normalizedPath) ? normalizedPath : null; + return npmGlobalPrefixCache; + } catch (error) { + console.warn(`[env-utils] Failed to get npm global prefix: ${error}`); + npmGlobalPrefixCache = null; + return null; + } finally { + npmGlobalPrefixCachePromise = null; + } + })(); + + return npmGlobalPrefixCachePromise; +} + +/** + * Get augmented environment asynchronously (non-blocking) + * + * Same as getAugmentedEnv but uses async npm prefix detection. + * Safe to call from Electron main process without blocking. + * + * @param additionalPaths - Optional array of additional paths to include + * @returns Promise resolving to environment object with augmented PATH + */ +export async function getAugmentedEnvAsync(additionalPaths?: string[]): Promise> { + const env = { ...process.env } as Record; + const platform = process.platform as 'darwin' | 'linux' | 'win32'; + const pathSeparator = platform === 'win32' ? ';' : ':'; + + // Get all candidate paths (platform + additional) + const candidatePaths = getExpandedPlatformPaths(additionalPaths); + + // Ensure essential system paths are present (for macOS Keychain access) + let currentPath = env.PATH || ''; + + if (platform !== 'win32') { + const pathSetForEssentials = new Set(currentPath.split(pathSeparator).filter(Boolean)); + const missingEssentials = ESSENTIAL_SYSTEM_PATHS.filter(p => !pathSetForEssentials.has(p)); + + if (missingEssentials.length > 0) { + currentPath = currentPath + ? `${currentPath}${pathSeparator}${missingEssentials.join(pathSeparator)}` + : missingEssentials.join(pathSeparator); + } + } + + // Collect paths to add (only if they exist and aren't already in PATH) + const currentPathSet = new Set(currentPath.split(pathSeparator).filter(Boolean)); + + // Check existence asynchronously in parallel for performance + const pathChecks = await Promise.all( + candidatePaths.map(async (p) => ({ path: p, exists: await existsAsync(p) })) + ); + const existingPaths = new Set( + pathChecks.filter(({ exists }) => exists).map(({ path: p }) => p) + ); + + // Get npm global prefix dynamically (async - non-blocking) + const npmPrefix = await getNpmGlobalPrefixAsync(); + if (npmPrefix && await existsAsync(npmPrefix)) { + existingPaths.add(npmPrefix); + } + + // Build final paths to add using shared helper + const pathsToAdd = buildPathsToAdd(candidatePaths, currentPathSet, existingPaths, npmPrefix); + + // Prepend new paths to PATH (prepend so they take priority) + env.PATH = [...pathsToAdd, currentPath].filter(Boolean).join(pathSeparator); + + return env; +} + +/** + * Find the full path to an executable asynchronously (non-blocking) + * + * Same as findExecutable but uses async environment augmentation. + * + * @param command - The command name to find (e.g., 'gh', 'git') + * @returns Promise resolving to the full path to the executable, or null + */ +export async function findExecutableAsync(command: string): Promise { + const env = await getAugmentedEnvAsync(); + const pathSeparator = process.platform === 'win32' ? ';' : ':'; + const pathDirs = (env.PATH || '').split(pathSeparator); + + const extensions = process.platform === 'win32' + ? ['.exe', '.cmd', '.bat', '.ps1', ''] + : ['']; + + for (const dir of pathDirs) { + for (const ext of extensions) { + const fullPath = path.join(dir, command + ext); + if (await existsAsync(fullPath)) { + return fullPath; + } + } + } + + return null; +} + +/** + * Clear the npm global prefix cache + * + * Call this if npm configuration changes and you need fresh detection. + */ +export function clearNpmPrefixCache(): void { + npmGlobalPrefixCache = undefined; + npmGlobalPrefixCachePromise = null; +} diff --git a/apps/frontend/src/main/index.ts b/apps/frontend/src/main/index.ts index 7cd856a0fe..8ee2eaf76c 100644 --- a/apps/frontend/src/main/index.ts +++ b/apps/frontend/src/main/index.ts @@ -1,6 +1,28 @@ -import { app, BrowserWindow, shell, nativeImage } from 'electron'; +// Load .env file FIRST before any other imports that might use process.env +import { config } from 'dotenv'; +import { resolve, dirname } from 'path'; +import { existsSync } from 'fs'; + +// Load .env from apps/frontend directory +// In development: __dirname is out/main (compiled), so go up 2 levels +// In production: app resources directory +const possibleEnvPaths = [ + resolve(__dirname, '../../.env'), // Development: out/main -> apps/frontend/.env + resolve(__dirname, '../../../.env'), // Alternative: might be in different location + resolve(process.cwd(), 'apps/frontend/.env'), // Fallback: from workspace root +]; + +for (const envPath of possibleEnvPaths) { + if (existsSync(envPath)) { + config({ path: envPath }); + console.log(`[dotenv] Loaded environment from: ${envPath}`); + break; + } +} + +import { app, BrowserWindow, shell, nativeImage, session, screen } from 'electron'; import { join } from 'path'; -import { accessSync, readFileSync, writeFileSync } from 'fs'; +import { accessSync, readFileSync, writeFileSync, rmSync } from 'fs'; import { electronApp, optimizer, is } from '@electron-toolkit/utils'; import { setupIpcHandlers } from './ipc-setup'; import { AgentManager } from './agent'; @@ -12,11 +34,34 @@ import { initializeAppUpdater } from './app-updater'; import { DEFAULT_APP_SETTINGS } from '../shared/constants'; import { readSettingsFile } from './settings-utils'; import { setupErrorLogging } from './app-logger'; +import { initSentryMain } from './sentry'; +import { preWarmToolCache } from './cli-tool-manager'; +import { initializeClaudeProfileManager } from './claude-profile-manager'; import type { AppSettings } from '../shared/types'; +// โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ +// Window sizing constants +// โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ +/** Preferred window width on startup */ +const WINDOW_PREFERRED_WIDTH: number = 1400; +/** Preferred window height on startup */ +const WINDOW_PREFERRED_HEIGHT: number = 900; +/** Absolute minimum window width (supports high DPI displays with scaling) */ +const WINDOW_MIN_WIDTH: number = 800; +/** Absolute minimum window height (supports high DPI displays with scaling) */ +const WINDOW_MIN_HEIGHT: number = 500; +/** Margin from screen edges to avoid edge-to-edge windows */ +const WINDOW_SCREEN_MARGIN: number = 20; +/** Default screen dimensions used as fallback when screen.getPrimaryDisplay() fails */ +const DEFAULT_SCREEN_WIDTH: number = 1920; +const DEFAULT_SCREEN_HEIGHT: number = 1080; + // Setup error logging early (captures uncaught exceptions) setupErrorLogging(); +// Initialize Sentry for error tracking (respects user's sentryEnabled setting) +initSentryMain(); + /** * Load app settings synchronously (for use during startup). * This is a simple merge with defaults - no migrations or auto-detection. @@ -26,6 +71,32 @@ function loadSettingsSync(): AppSettings { return { ...DEFAULT_APP_SETTINGS, ...savedSettings } as AppSettings; } +/** + * Clean up stale update metadata files from the redundant source updater system. + * + * The old "source updater" wrote .update-metadata.json files that could persist + * across app updates and cause version display desync. This cleanup ensures + * we use the actual bundled version from app.getVersion(). + */ +function cleanupStaleUpdateMetadata(): void { + const userData = app.getPath('userData'); + const stalePaths = [ + join(userData, 'auto-claude-source'), + join(userData, 'backend-source'), + ]; + + for (const stalePath of stalePaths) { + if (existsSync(stalePath)) { + try { + rmSync(stalePath, { recursive: true, force: true }); + console.warn(`[main] Cleaned up stale update metadata: ${stalePath}`); + } catch (e) { + console.warn(`[main] Failed to clean up stale metadata at ${stalePath}:`, e); + } + } + } +} + // Get icon path based on platform function getIconPath(): string { // In dev mode, __dirname is out/main, so we go up to project root then into resources @@ -54,12 +125,51 @@ let agentManager: AgentManager | null = null; let terminalManager: TerminalManager | null = null; function createWindow(): void { + // Get the primary display's work area (accounts for taskbar, dock, etc.) + // Wrapped in try/catch to handle potential failures with fallback to safe defaults + let workAreaSize: { width: number; height: number }; + try { + const display = screen.getPrimaryDisplay(); + // Validate the returned object has expected structure with valid dimensions + if ( + display && + display.workAreaSize && + typeof display.workAreaSize.width === 'number' && + typeof display.workAreaSize.height === 'number' && + display.workAreaSize.width > 0 && + display.workAreaSize.height > 0 + ) { + workAreaSize = display.workAreaSize; + } else { + console.error( + '[main] screen.getPrimaryDisplay() returned unexpected structure:', + JSON.stringify(display) + ); + workAreaSize = { width: DEFAULT_SCREEN_WIDTH, height: DEFAULT_SCREEN_HEIGHT }; + } + } catch (error: unknown) { + console.error('[main] Failed to get primary display, using fallback dimensions:', error); + workAreaSize = { width: DEFAULT_SCREEN_WIDTH, height: DEFAULT_SCREEN_HEIGHT }; + } + + // Calculate available space with a small margin to avoid edge-to-edge windows + const availableWidth: number = workAreaSize.width - WINDOW_SCREEN_MARGIN; + const availableHeight: number = workAreaSize.height - WINDOW_SCREEN_MARGIN; + + // Calculate actual dimensions (preferred, but capped to margin-adjusted available space) + const width: number = Math.min(WINDOW_PREFERRED_WIDTH, availableWidth); + const height: number = Math.min(WINDOW_PREFERRED_HEIGHT, availableHeight); + + // Ensure minimum dimensions don't exceed the actual initial window size + const minWidth: number = Math.min(WINDOW_MIN_WIDTH, width); + const minHeight: number = Math.min(WINDOW_MIN_HEIGHT, height); + // Create the browser window mainWindow = new BrowserWindow({ - width: 1400, - height: 900, - minWidth: 1000, - minHeight: 700, + width, + height, + minWidth, + minHeight, show: false, autoHideMenuBar: true, titleBarStyle: 'hiddenInset', @@ -110,11 +220,29 @@ if (process.platform === 'darwin') { app.name = 'Auto Claude'; } +// Fix Windows GPU cache permission errors (0x5 Access Denied) +if (process.platform === 'win32') { + app.commandLine.appendSwitch('disable-gpu-shader-disk-cache'); + app.commandLine.appendSwitch('disable-gpu-program-cache'); + console.log('[main] Applied Windows GPU cache fixes'); +} + // Initialize the application app.whenReady().then(() => { // Set app user model id for Windows electronApp.setAppUserModelId('com.autoclaude.ui'); + // Clear cache on Windows to prevent permission errors from stale cache + if (process.platform === 'win32') { + session.defaultSession.clearCache() + .then(() => console.log('[main] Cleared cache on startup')) + .catch((err) => console.warn('[main] Failed to clear cache:', err)); + } + + // Clean up stale update metadata from the old source updater system + // This prevents version display desync after electron-updater installs a new version + cleanupStaleUpdateMetadata(); + // Set dock icon on macOS if (process.platform === 'darwin') { const iconPath = getIconPath(); @@ -222,6 +350,23 @@ app.whenReady().then(() => { // Create window createWindow(); + // Pre-warm CLI tool cache in background (non-blocking) + // This ensures CLI detection is done before user needs it + // Include all commonly used tools to prevent sync blocking on first use + setImmediate(() => { + preWarmToolCache(['claude', 'git', 'gh', 'python']).catch((error) => { + console.warn('[main] Failed to pre-warm CLI cache:', error); + }); + }); + + // Pre-initialize Claude profile manager in background (non-blocking) + // This ensures profile data is loaded before user clicks "Start Claude Code" + setImmediate(() => { + initializeClaudeProfileManager().catch((error) => { + console.warn('[main] Failed to pre-initialize profile manager:', error); + }); + }); + // Initialize usage monitoring after window is created if (mainWindow) { // Setup event forwarding from usage monitor to renderer diff --git a/apps/frontend/src/main/insights/config.ts b/apps/frontend/src/main/insights/config.ts index 0ca1609c13..97e8a9a28d 100644 --- a/apps/frontend/src/main/insights/config.ts +++ b/apps/frontend/src/main/insights/config.ts @@ -1,9 +1,12 @@ import path from 'path'; import { existsSync, readFileSync } from 'fs'; -import { app } from 'electron'; import { getProfileEnv } from '../rate-limit-detector'; +import { getAPIProfileEnv } from '../services/profile'; +import { getOAuthModeClearVars } from '../agent/env-utils'; +import { pythonEnvManager, getConfiguredPythonPath } from '../python-env-manager'; import { getValidatedPythonPath } from '../python-detector'; -import { getConfiguredPythonPath } from '../python-env-manager'; +import { getAugmentedEnv } from '../env-utils'; +import { getEffectiveSourcePath } from '../updater/path-resolver'; /** * Configuration manager for insights service @@ -40,24 +43,23 @@ export class InsightsConfig { /** * Get the auto-claude source path (detects automatically if not configured) + * Uses getEffectiveSourcePath() which handles userData override for user-updated backend */ getAutoBuildSourcePath(): string | null { if (this.autoBuildSourcePath && existsSync(this.autoBuildSourcePath)) { return this.autoBuildSourcePath; } - const possiblePaths = [ - // Apps structure: from out/main -> apps/backend - path.resolve(__dirname, '..', '..', '..', 'backend'), - path.resolve(app.getAppPath(), '..', 'backend'), - path.resolve(process.cwd(), 'apps', 'backend') - ]; - - for (const p of possiblePaths) { - if (existsSync(p) && existsSync(path.join(p, 'runners', 'spec_runner.py'))) { - return p; - } + // Use shared path resolver which handles: + // 1. User settings (autoBuildPath) + // 2. userData override (backend-source) for user-updated backend + // 3. Bundled backend (process.resourcesPath/backend) + // 4. Development paths + const effectivePath = getEffectiveSourcePath(); + if (existsSync(effectivePath) && existsSync(path.join(effectivePath, 'runners', 'spec_runner.py'))) { + return effectivePath; } + return null; } @@ -104,17 +106,51 @@ export class InsightsConfig { * Get complete environment for process execution * Includes system env, auto-claude env, and active Claude profile */ - getProcessEnv(): Record { + async getProcessEnv(): Promise> { const autoBuildEnv = this.loadAutoBuildEnv(); const profileEnv = getProfileEnv(); + const apiProfileEnv = await getAPIProfileEnv(); + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + const pythonEnv = pythonEnvManager.getPythonEnv(); + const autoBuildSource = this.getAutoBuildSourcePath(); + const pythonPathParts = (pythonEnv.PYTHONPATH ?? '') + .split(path.delimiter) + .map((entry) => entry.trim()) + .filter(Boolean) + .map((entry) => path.resolve(entry)); + + if (autoBuildSource) { + const normalizedAutoBuildSource = path.resolve(autoBuildSource); + const autoBuildComparator = process.platform === 'win32' + ? normalizedAutoBuildSource.toLowerCase() + : normalizedAutoBuildSource; + const hasAutoBuildSource = pythonPathParts.some((entry) => { + const candidate = process.platform === 'win32' ? entry.toLowerCase() : entry; + return candidate === autoBuildComparator; + }); + + if (!hasAutoBuildSource) { + pythonPathParts.push(normalizedAutoBuildSource); + } + } + + const combinedPythonPath = pythonPathParts.join(path.delimiter); + + // Use getAugmentedEnv() to ensure common tool paths (claude, dotnet, etc.) + // are available even when app is launched from Finder/Dock. + const augmentedEnv = getAugmentedEnv(); return { - ...process.env as Record, + ...augmentedEnv, + ...pythonEnv, // Include PYTHONPATH for bundled site-packages ...autoBuildEnv, + ...oauthModeClearVars, ...profileEnv, + ...apiProfileEnv, PYTHONUNBUFFERED: '1', PYTHONIOENCODING: 'utf-8', - PYTHONUTF8: '1' + PYTHONUTF8: '1', + ...(combinedPythonPath ? { PYTHONPATH: combinedPythonPath } : {}) }; } } diff --git a/apps/frontend/src/main/insights/insights-executor.ts b/apps/frontend/src/main/insights/insights-executor.ts index d5565620fe..0c349b3480 100644 --- a/apps/frontend/src/main/insights/insights-executor.ts +++ b/apps/frontend/src/main/insights/insights-executor.ts @@ -85,7 +85,7 @@ export class InsightsExecutor extends EventEmitter { } as InsightsChatStatus); // Get process environment - const processEnv = this.config.getProcessEnv(); + const processEnv = await this.config.getProcessEnv(); // Write conversation history to temp file to avoid Windows command-line length limit const historyFile = path.join( @@ -130,6 +130,7 @@ export class InsightsExecutor extends EventEmitter { let suggestedTask: InsightsChatMessage['suggestedTask'] | undefined; const toolsUsed: InsightsToolUsage[] = []; let allInsightsOutput = ''; + let stderrOutput = ''; proc.stdout?.on('data', (data: Buffer) => { const text = data.toString(); @@ -159,8 +160,9 @@ export class InsightsExecutor extends EventEmitter { proc.stderr?.on('data', (data: Buffer) => { const text = data.toString(); - // Collect stderr for rate limit detection too + // Collect stderr for rate limit detection and error reporting allInsightsOutput = (allInsightsOutput + text).slice(-10000); + stderrOutput = (stderrOutput + text).slice(-2000); console.error('[Insights]', text); }); @@ -196,7 +198,11 @@ export class InsightsExecutor extends EventEmitter { toolsUsed }); } else { - const error = `Process exited with code ${code}`; + // Include stderr output in error message for debugging + const stderrSummary = stderrOutput.trim() + ? `\n\nError output:\n${stderrOutput.slice(-500)}` + : ''; + const error = `Process exited with code ${code}${stderrSummary}`; this.emit('stream-chunk', projectId, { type: 'error', error diff --git a/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts b/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts index cbe4a67b68..1c6b350b44 100644 --- a/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts @@ -1,6 +1,7 @@ import type { BrowserWindow } from 'electron'; import path from 'path'; -import { IPC_CHANNELS, getSpecsDir, AUTO_BUILD_PATHS } from '../../shared/constants'; +import { existsSync } from 'fs'; +import { IPC_CHANNELS, AUTO_BUILD_PATHS, getSpecsDir } from '../../shared/constants'; import type { SDKRateLimitInfo, Task, @@ -15,6 +16,8 @@ import { fileWatcher } from '../file-watcher'; import { projectStore } from '../project-store'; import { notificationService } from '../notification-service'; import { persistPlanStatusSync, getPlanPath } from './task/plan-file-utils'; +import { findTaskWorktree } from '../worktree-paths'; +import { findTaskAndProject } from './task/shared'; /** @@ -31,14 +34,18 @@ export function registerAgenteventsHandlers( agentManager.on('log', (taskId: string, log: string) => { const mainWindow = getMainWindow(); if (mainWindow) { - mainWindow.webContents.send(IPC_CHANNELS.TASK_LOG, taskId, log); + // Include projectId for multi-project filtering (issue #723) + const { project } = findTaskAndProject(taskId); + mainWindow.webContents.send(IPC_CHANNELS.TASK_LOG, taskId, log, project?.id); } }); agentManager.on('error', (taskId: string, error: string) => { const mainWindow = getMainWindow(); if (mainWindow) { - mainWindow.webContents.send(IPC_CHANNELS.TASK_ERROR, taskId, error); + // Include projectId for multi-project filtering (issue #723) + const { project } = findTaskAndProject(taskId); + mainWindow.webContents.send(IPC_CHANNELS.TASK_ERROR, taskId, error, project?.id); } }); @@ -61,11 +68,15 @@ export function registerAgenteventsHandlers( agentManager.on('exit', (taskId: string, code: number | null, processType: ProcessType) => { const mainWindow = getMainWindow(); if (mainWindow) { + // Get project info early for multi-project filtering (issue #723) + const { project: exitProject } = findTaskAndProject(taskId); + const exitProjectId = exitProject?.id; + // Send final plan state to renderer BEFORE unwatching // This ensures the renderer has the final subtask data (fixes 0/0 subtask bug) const finalPlan = fileWatcher.getCurrentPlan(taskId); if (finalPlan) { - mainWindow.webContents.send(IPC_CHANNELS.TASK_PROGRESS, taskId, finalPlan); + mainWindow.webContents.send(IPC_CHANNELS.TASK_PROGRESS, taskId, finalPlan, exitProjectId); } fileWatcher.unwatch(taskId); @@ -81,6 +92,12 @@ export function registerAgenteventsHandlers( try { const projects = projectStore.getProjects(); + // IMPORTANT: Invalidate cache for all projects to ensure we get fresh data + // This prevents race conditions where cached task data has stale status + for (const p of projects) { + projectStore.invalidateTasksCache(p.id); + } + for (const p of projects) { const tasks = projectStore.getTasks(p.id); task = tasks.find((t) => t.id === taskId || t.specId === taskId); @@ -92,42 +109,72 @@ export function registerAgenteventsHandlers( if (task && project) { const taskTitle = task.title || task.specId; - const planPath = getPlanPath(project, task); + const mainPlanPath = getPlanPath(project, task); + const projectId = project.id; // Capture for closure + + // Capture task values for closure + const taskSpecId = task.specId; + const projectPath = project.path; + const autoBuildPath = project.autoBuildPath; // Use shared utility for persisting status (prevents race conditions) + // Persist to both main project AND worktree (if exists) for consistency const persistStatus = (status: TaskStatus) => { - const persisted = persistPlanStatusSync(planPath, status); - if (persisted) { - console.log(`[Task ${taskId}] Persisted status to plan: ${status}`); + // Persist to main project + const mainPersisted = persistPlanStatusSync(mainPlanPath, status, projectId); + if (mainPersisted) { + console.warn(`[Task ${taskId}] Persisted status to main plan: ${status}`); + } + + // Also persist to worktree if it exists + const worktreePath = findTaskWorktree(projectPath, taskSpecId); + if (worktreePath) { + const specsBaseDir = getSpecsDir(autoBuildPath); + const worktreePlanPath = path.join( + worktreePath, + specsBaseDir, + taskSpecId, + AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN + ); + if (existsSync(worktreePlanPath)) { + const worktreePersisted = persistPlanStatusSync(worktreePlanPath, status, projectId); + if (worktreePersisted) { + console.warn(`[Task ${taskId}] Persisted status to worktree plan: ${status}`); + } + } } }; if (code === 0) { notificationService.notifyReviewNeeded(taskTitle, project.id, taskId); - + // Fallback: Ensure status is updated even if COMPLETE phase event was missed // This prevents tasks from getting stuck in ai_review status // Uses inverted logic to also handle tasks with no subtasks (treats them as complete) const isActiveStatus = task.status === 'in_progress' || task.status === 'ai_review'; - const hasIncompleteSubtasks = task.subtasks && task.subtasks.length > 0 && + const hasIncompleteSubtasks = task.subtasks && task.subtasks.length > 0 && task.subtasks.some((s) => s.status !== 'completed'); - + if (isActiveStatus && !hasIncompleteSubtasks) { - console.log(`[Task ${taskId}] Fallback: Moving to human_review (process exited successfully)`); + console.warn(`[Task ${taskId}] Fallback: Moving to human_review (process exited successfully)`); persistStatus('human_review'); + // Include projectId for multi-project filtering (issue #723) mainWindow.webContents.send( IPC_CHANNELS.TASK_STATUS_CHANGE, taskId, - 'human_review' as TaskStatus + 'human_review' as TaskStatus, + projectId ); } } else { notificationService.notifyTaskFailed(taskTitle, project.id, taskId); persistStatus('human_review'); + // Include projectId for multi-project filtering (issue #723) mainWindow.webContents.send( IPC_CHANNELS.TASK_STATUS_CHANGE, taskId, - 'human_review' as TaskStatus + 'human_review' as TaskStatus, + projectId ); } } @@ -140,7 +187,12 @@ export function registerAgenteventsHandlers( agentManager.on('execution-progress', (taskId: string, progress: ExecutionProgressData) => { const mainWindow = getMainWindow(); if (mainWindow) { - mainWindow.webContents.send(IPC_CHANNELS.TASK_EXECUTION_PROGRESS, taskId, progress); + // Use shared helper to find task and project (issue #723 - deduplicate lookup) + const { task, project } = findTaskAndProject(taskId); + const taskProjectId = project?.id; + + // Include projectId in execution progress event for multi-project filtering + mainWindow.webContents.send(IPC_CHANNELS.TASK_EXECUTION_PROGRESS, taskId, progress, taskProjectId); const phaseToStatus: Record = { 'idle': null, @@ -154,30 +206,45 @@ export function registerAgenteventsHandlers( const newStatus = phaseToStatus[progress.phase]; if (newStatus) { + // Include projectId in status change event for multi-project filtering mainWindow.webContents.send( IPC_CHANNELS.TASK_STATUS_CHANGE, taskId, - newStatus + newStatus, + taskProjectId ); - // CRITICAL: Persist status to plan file to prevent flip-flop on task list refresh + // CRITICAL: Persist status to plan file(s) to prevent flip-flop on task list refresh // When getTasks() is called, it reads status from the plan file. Without persisting, // the status in the file might differ from the UI, causing inconsistent state. // Uses shared utility with locking to prevent race conditions. - try { - const projects = projectStore.getProjects(); - for (const p of projects) { - const tasks = projectStore.getTasks(p.id); - const task = tasks.find((t) => t.id === taskId || t.specId === taskId); - if (task) { - const planPath = getPlanPath(p, task); - persistPlanStatusSync(planPath, newStatus); - break; + // IMPORTANT: We persist to BOTH main project AND worktree (if exists) to ensure + // consistency, since getTasks() prefers the worktree version. + if (task && project) { + try { + // Persist to main project plan file + const mainPlanPath = getPlanPath(project, task); + persistPlanStatusSync(mainPlanPath, newStatus, project.id); + + // Also persist to worktree plan file if it exists + // This ensures consistency since getTasks() prefers worktree version + const worktreePath = findTaskWorktree(project.path, task.specId); + if (worktreePath) { + const specsBaseDir = getSpecsDir(project.autoBuildPath); + const worktreePlanPath = path.join( + worktreePath, + specsBaseDir, + task.specId, + AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN + ); + if (existsSync(worktreePlanPath)) { + persistPlanStatusSync(worktreePlanPath, newStatus, project.id); + } } + } catch (err) { + // Ignore persistence errors - UI will still work, just might flip on refresh + console.warn('[execution-progress] Could not persist status:', err); } - } catch (err) { - // Ignore persistence errors - UI will still work, just might flip on refresh - console.warn('[execution-progress] Could not persist status:', err); } } } @@ -190,14 +257,18 @@ export function registerAgenteventsHandlers( fileWatcher.on('progress', (taskId: string, plan: ImplementationPlan) => { const mainWindow = getMainWindow(); if (mainWindow) { - mainWindow.webContents.send(IPC_CHANNELS.TASK_PROGRESS, taskId, plan); + // Use shared helper to find project (issue #723 - deduplicate lookup) + const { project } = findTaskAndProject(taskId); + mainWindow.webContents.send(IPC_CHANNELS.TASK_PROGRESS, taskId, plan, project?.id); } }); fileWatcher.on('error', (taskId: string, error: string) => { const mainWindow = getMainWindow(); if (mainWindow) { - mainWindow.webContents.send(IPC_CHANNELS.TASK_ERROR, taskId, error); + // Include projectId for multi-project filtering (issue #723) + const { project } = findTaskAndProject(taskId); + mainWindow.webContents.send(IPC_CHANNELS.TASK_ERROR, taskId, error, project?.id); } }); } diff --git a/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts b/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts index 1d0b963efc..66c7f3ee3d 100644 --- a/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts @@ -11,6 +11,7 @@ import type { IPCResult, AppUpdateInfo } from '../../shared/types'; import { checkForUpdates, downloadUpdate, + downloadStableVersion, quitAndInstall, getCurrentVersion } from '../app-updater'; @@ -65,6 +66,26 @@ export function registerAppUpdateHandlers(): void { } ); + /** + * APP_UPDATE_DOWNLOAD_STABLE: Download stable version (for downgrade from beta) + * Uses allowDowngrade to download an older stable version + */ + ipcMain.handle( + IPC_CHANNELS.APP_UPDATE_DOWNLOAD_STABLE, + async (): Promise => { + try { + await downloadStableVersion(); + return { success: true }; + } catch (error) { + console.error('[app-update-handlers] Download stable version failed:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to download stable version' + }; + } + } + ); + /** * APP_UPDATE_INSTALL: Quit and install update * Quits the app and installs the downloaded update diff --git a/apps/frontend/src/main/ipc-handlers/autobuild-source-handlers.ts b/apps/frontend/src/main/ipc-handlers/autobuild-source-handlers.ts deleted file mode 100644 index 4a4ab66d82..0000000000 --- a/apps/frontend/src/main/ipc-handlers/autobuild-source-handlers.ts +++ /dev/null @@ -1,321 +0,0 @@ -import { ipcMain } from 'electron'; -import type { BrowserWindow } from 'electron'; -import { IPC_CHANNELS } from '../../shared/constants'; -import type { IPCResult } from '../../shared/types'; -import path from 'path'; -import { existsSync, readFileSync, writeFileSync } from 'fs'; -import type { AutoBuildSourceUpdateProgress, SourceEnvConfig, SourceEnvCheckResult } from '../../shared/types'; -import { checkForUpdates as checkSourceUpdates, downloadAndApplyUpdate, getBundledVersion, getEffectiveVersion, getEffectiveSourcePath } from '../auto-claude-updater'; -import { debugLog } from '../../shared/utils/debug-logger'; - - -/** - * Register all autobuild-source-related IPC handlers - */ -export function registerAutobuildSourceHandlers( - getMainWindow: () => BrowserWindow | null -): void { - // ============================================ - // Auto Claude Source Update Operations - // ============================================ - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_CHECK, - async (): Promise> => { - console.log('[autobuild-source] Check for updates called'); - debugLog('[IPC] AUTOBUILD_SOURCE_CHECK called'); - try { - const result = await checkSourceUpdates(); - console.log('[autobuild-source] Check result:', JSON.stringify(result, null, 2)); - debugLog('[IPC] AUTOBUILD_SOURCE_CHECK result:', result); - return { success: true, data: result }; - } catch (error) { - console.error('[autobuild-source] Check error:', error); - debugLog('[IPC] AUTOBUILD_SOURCE_CHECK error:', error); - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to check for updates' - }; - } - } - ); - - ipcMain.on( - IPC_CHANNELS.AUTOBUILD_SOURCE_DOWNLOAD, - () => { - debugLog('[IPC] Autobuild source download requested'); - const mainWindow = getMainWindow(); - if (!mainWindow) { - debugLog('[IPC] No main window available, aborting update'); - return; - } - - // Start download in background - downloadAndApplyUpdate((progress) => { - debugLog('[IPC] Update progress:', progress.stage, progress.message); - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - progress - ); - }).then((result) => { - if (result.success) { - debugLog('[IPC] Update completed successfully, version:', result.version); - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - { - stage: 'complete', - message: `Updated to version ${result.version}`, - newVersion: result.version // Include new version for UI refresh - } as AutoBuildSourceUpdateProgress - ); - } else { - debugLog('[IPC] Update failed:', result.error); - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - { - stage: 'error', - message: result.error || 'Update failed' - } as AutoBuildSourceUpdateProgress - ); - } - }).catch((error) => { - debugLog('[IPC] Update error:', error instanceof Error ? error.message : error); - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - { - stage: 'error', - message: error instanceof Error ? error.message : 'Update failed' - } as AutoBuildSourceUpdateProgress - ); - }); - - // Send initial progress - mainWindow.webContents.send( - IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, - { - stage: 'checking', - message: 'Starting update...' - } as AutoBuildSourceUpdateProgress - ); - } - ); - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_VERSION, - async (): Promise> => { - try { - // Use effective version which accounts for source updates - const version = getEffectiveVersion(); - debugLog('[IPC] Returning effective version:', version); - return { success: true, data: version }; - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to get version' - }; - } - } - ); - - // ============================================ - // Auto Claude Source Environment Operations - // ============================================ - - /** - * Parse an .env file content into a key-value object - */ - const parseSourceEnvFile = (content: string): Record => { - const vars: Record = {}; - for (const line of content.split('\n')) { - const trimmed = line.trim(); - if (!trimmed || trimmed.startsWith('#')) continue; - - const eqIndex = trimmed.indexOf('='); - if (eqIndex > 0) { - const key = trimmed.substring(0, eqIndex).trim(); - let value = trimmed.substring(eqIndex + 1).trim(); - // Remove quotes if present - if ((value.startsWith('"') && value.endsWith('"')) || - (value.startsWith("'") && value.endsWith("'"))) { - value = value.slice(1, -1); - } - vars[key] = value; - } - } - return vars; - }; - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_GET, - async (): Promise> => { - try { - const sourcePath = getEffectiveSourcePath(); - if (!sourcePath) { - return { - success: true, - data: { - hasClaudeToken: false, - envExists: false, - sourcePath: undefined - } - }; - } - - const envPath = path.join(sourcePath, '.env'); - const envExists = existsSync(envPath); - - if (!envExists) { - return { - success: true, - data: { - hasClaudeToken: false, - envExists: false, - sourcePath - } - }; - } - - const content = readFileSync(envPath, 'utf-8'); - const vars = parseSourceEnvFile(content); - const hasToken = !!vars['CLAUDE_CODE_OAUTH_TOKEN']; - - return { - success: true, - data: { - hasClaudeToken: hasToken, - claudeOAuthToken: hasToken ? vars['CLAUDE_CODE_OAUTH_TOKEN'] : undefined, - envExists: true, - sourcePath - } - }; - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to get source env' - }; - } - } - ); - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_UPDATE, - async (_, config: { claudeOAuthToken?: string }): Promise => { - try { - const sourcePath = getEffectiveSourcePath(); - if (!sourcePath) { - return { - success: false, - error: 'Auto-Claude source path not found. Please configure it in App Settings.' - }; - } - - const envPath = path.join(sourcePath, '.env'); - - // Read existing content or start fresh - let existingContent = ''; - const existingVars: Record = {}; - - if (existsSync(envPath)) { - existingContent = readFileSync(envPath, 'utf-8'); - Object.assign(existingVars, parseSourceEnvFile(existingContent)); - } - - // Update the token - if (config.claudeOAuthToken !== undefined) { - existingVars['CLAUDE_CODE_OAUTH_TOKEN'] = config.claudeOAuthToken; - } - - // Rebuild the .env file preserving comments and structure - const lines = existingContent.split('\n'); - const processedKeys = new Set(); - const outputLines: string[] = []; - - for (const line of lines) { - const trimmed = line.trim(); - if (!trimmed || trimmed.startsWith('#')) { - outputLines.push(line); - continue; - } - - const eqIndex = trimmed.indexOf('='); - if (eqIndex > 0) { - const key = trimmed.substring(0, eqIndex).trim(); - if (key in existingVars) { - outputLines.push(`${key}=${existingVars[key]}`); - processedKeys.add(key); - } else { - outputLines.push(line); - } - } else { - outputLines.push(line); - } - } - - // Add any new keys that weren't in the original file - for (const [key, value] of Object.entries(existingVars)) { - if (!processedKeys.has(key)) { - outputLines.push(`${key}=${value}`); - } - } - - writeFileSync(envPath, outputLines.join('\n')); - - return { success: true }; - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to update source env' - }; - } - } - ); - - ipcMain.handle( - IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN, - async (): Promise> => { - try { - const sourcePath = getEffectiveSourcePath(); - if (!sourcePath) { - return { - success: true, - data: { - hasToken: false, - sourcePath: undefined, - error: 'Auto-Claude source path not found' - } - }; - } - - const envPath = path.join(sourcePath, '.env'); - if (!existsSync(envPath)) { - return { - success: true, - data: { - hasToken: false, - sourcePath, - error: '.env file does not exist' - } - }; - } - - const content = readFileSync(envPath, 'utf-8'); - const vars = parseSourceEnvFile(content); - const hasToken = !!vars['CLAUDE_CODE_OAUTH_TOKEN'] && vars['CLAUDE_CODE_OAUTH_TOKEN'].length > 0; - - return { - success: true, - data: { - hasToken, - sourcePath - } - }; - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : 'Failed to check source token' - }; - } - } - ); - -} diff --git a/apps/frontend/src/main/ipc-handlers/env-handlers.ts b/apps/frontend/src/main/ipc-handlers/env-handlers.ts index 9574215b9e..99ab0790c4 100644 --- a/apps/frontend/src/main/ipc-handlers/env-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/env-handlers.ts @@ -8,6 +8,8 @@ import { existsSync, readFileSync, writeFileSync } from 'fs'; import { spawn } from 'child_process'; import { projectStore } from '../project-store'; import { parseEnvFile } from './utils'; +import { getClaudeCliInvocation, getClaudeCliInvocationAsync } from '../claude-cli-utils'; +import { debugError } from '../../shared/utils/debug-logger'; // GitLab environment variable keys const GITLAB_ENV_KEYS = { @@ -25,6 +27,43 @@ function envLine(vars: Record, key: string, defaultVal: string = return vars[key] ? `${key}=${vars[key]}` : `# ${key}=${defaultVal}`; } +type ResolvedClaudeCliInvocation = + | { command: string; env: Record } + | { error: string }; + +function resolveClaudeCliInvocation(): ResolvedClaudeCliInvocation { + try { + const invocation = getClaudeCliInvocation(); + if (!invocation?.command) { + throw new Error('Claude CLI path not resolved'); + } + return { command: invocation.command, env: invocation.env }; + } catch (error) { + debugError('[IPC] Failed to resolve Claude CLI path:', error); + return { + error: error instanceof Error ? error.message : 'Failed to resolve Claude CLI path', + }; + } +} + +/** + * Async version of resolveClaudeCliInvocation - non-blocking for main process + */ +async function resolveClaudeCliInvocationAsync(): Promise { + try { + const invocation = await getClaudeCliInvocationAsync(); + if (!invocation?.command) { + throw new Error('Claude CLI path not resolved'); + } + return { command: invocation.command, env: invocation.env }; + } catch (error) { + debugError('[IPC] Failed to resolve Claude CLI path:', error); + return { + error: error instanceof Error ? error.message : 'Failed to resolve Claude CLI path', + }; + } +} + /** * Register all env-related IPC handlers @@ -552,13 +591,21 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ return { success: false, error: 'Project not found' }; } + // Use async version to avoid blocking main process during CLI detection + const resolved = await resolveClaudeCliInvocationAsync(); + if ('error' in resolved) { + return { success: false, error: resolved.error }; + } + const claudeCmd = resolved.command; + const claudeEnv = resolved.env; + try { // Check if Claude CLI is available and authenticated const result = await new Promise((resolve) => { - const proc = spawn('claude', ['--version'], { + const proc = spawn(claudeCmd, ['--version'], { cwd: project.path, - env: { ...process.env }, - shell: true + env: claudeEnv, + shell: false }); let _stdout = ''; @@ -576,10 +623,10 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ if (code === 0) { // Claude CLI is available, check if authenticated // Run a simple command that requires auth - const authCheck = spawn('claude', ['api', '--help'], { + const authCheck = spawn(claudeCmd, ['api', '--help'], { cwd: project.path, - env: { ...process.env }, - shell: true + env: claudeEnv, + shell: false }); authCheck.on('close', (authCode: number | null) => { @@ -614,6 +661,9 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ }); }); + if (!result.success) { + return { success: false, error: result.error || 'Failed to check Claude auth' }; + } return { success: true, data: result }; } catch (error) { return { @@ -632,13 +682,21 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ return { success: false, error: 'Project not found' }; } + // Use async version to avoid blocking main process during CLI detection + const resolved = await resolveClaudeCliInvocationAsync(); + if ('error' in resolved) { + return { success: false, error: resolved.error }; + } + const claudeCmd = resolved.command; + const claudeEnv = resolved.env; + try { // Run claude setup-token which will open browser for OAuth const result = await new Promise((resolve) => { - const proc = spawn('claude', ['setup-token'], { + const proc = spawn(claudeCmd, ['setup-token'], { cwd: project.path, - env: { ...process.env }, - shell: true, + env: claudeEnv, + shell: false, stdio: 'inherit' // This allows the terminal to handle the interactive auth }); @@ -666,6 +724,9 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_ }); }); + if (!result.success) { + return { success: false, error: result.error || 'Failed to invoke Claude setup' }; + } return { success: true, data: result }; } catch (error) { return { diff --git a/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts b/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts index 616106675d..4c3c942f7e 100644 --- a/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts +++ b/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts @@ -10,11 +10,15 @@ const mockSpawn = vi.fn(); const mockExecSync = vi.fn(); const mockExecFileSync = vi.fn(); -vi.mock('child_process', () => ({ - spawn: (...args: unknown[]) => mockSpawn(...args), - execSync: (...args: unknown[]) => mockExecSync(...args), - execFileSync: (...args: unknown[]) => mockExecFileSync(...args) -})); +vi.mock('child_process', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + spawn: (...args: unknown[]) => mockSpawn(...args), + execSync: (...args: unknown[]) => mockExecSync(...args), + execFileSync: (...args: unknown[]) => mockExecFileSync(...args) + }; +}); // Mock shell.openExternal const mockOpenExternal = vi.fn(); @@ -82,6 +86,13 @@ vi.mock('../../../env-utils', () => ({ isCommandAvailable: vi.fn((cmd: string) => mockFindExecutable(cmd) !== null) })); +// Mock cli-tool-manager to avoid child_process import issues +vi.mock('../../../cli-tool-manager', () => ({ + getToolPath: vi.fn(() => '/usr/local/bin/gh'), + detectCLITools: vi.fn(), + getAllToolStatus: vi.fn() +})); + // Create mock process for spawn function createMockProcess(): EventEmitter & { stdout: EventEmitter | null; diff --git a/apps/frontend/src/main/ipc-handlers/github/__tests__/runner-env-handlers.test.ts b/apps/frontend/src/main/ipc-handlers/github/__tests__/runner-env-handlers.test.ts new file mode 100644 index 0000000000..751578da7f --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/github/__tests__/runner-env-handlers.test.ts @@ -0,0 +1,260 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import type { Project } from '../../../../shared/types'; +import { IPC_CHANNELS } from '../../../../shared/constants'; +import type { BrowserWindow } from 'electron'; +import type { AgentManager } from '../../../agent/agent-manager'; +import type { createIPCCommunicators as createIPCCommunicatorsType } from '../utils/ipc-communicator'; + +const mockIpcMain = vi.hoisted(() => { + class HoistedMockIpcMain { + handlers = new Map(); + listeners = new Map(); + + handle(channel: string, handler: Function): void { + this.handlers.set(channel, handler); + } + + on(channel: string, listener: Function): void { + this.listeners.set(channel, listener); + } + + async invokeHandler(channel: string, ...args: unknown[]): Promise { + const handler = this.handlers.get(channel); + if (!handler) { + throw new Error(`No handler for channel: ${channel}`); + } + return handler({}, ...args); + } + + async emit(channel: string, ...args: unknown[]): Promise { + const listener = this.listeners.get(channel); + if (!listener) { + throw new Error(`No listener for channel: ${channel}`); + } + await listener({}, ...args); + } + + reset(): void { + this.handlers.clear(); + this.listeners.clear(); + } + } + + return new HoistedMockIpcMain(); +}); + +const mockRunPythonSubprocess = vi.fn(); +const mockValidateGitHubModule = vi.fn(); +const mockGetRunnerEnv = vi.fn(); +type CreateIPCCommunicators = typeof createIPCCommunicatorsType; + +const mockCreateIPCCommunicators = vi.fn( + (..._args: Parameters) => ({ + sendProgress: vi.fn(), + sendComplete: vi.fn(), + sendError: vi.fn(), + }) +) as unknown as CreateIPCCommunicators; + +const projectRef: { current: Project | null } = { current: null }; +const tempDirs: string[] = []; + +vi.mock('electron', () => ({ + ipcMain: mockIpcMain, + BrowserWindow: class {}, + app: { + getPath: vi.fn(() => '/tmp'), + on: vi.fn(), + }, +})); + +vi.mock('../../../agent/agent-manager', () => ({ + AgentManager: class { + startSpecCreation = vi.fn(); + }, +})); + +vi.mock('../utils/ipc-communicator', () => ({ + createIPCCommunicators: (...args: Parameters) => + mockCreateIPCCommunicators(...args), +})); + +vi.mock('../utils/project-middleware', () => ({ + withProjectOrNull: async (_projectId: string, handler: (project: Project) => Promise) => { + if (!projectRef.current) { + return null; + } + return handler(projectRef.current); + }, +})); + +vi.mock('../utils/subprocess-runner', () => ({ + runPythonSubprocess: (...args: unknown[]) => mockRunPythonSubprocess(...args), + validateGitHubModule: (...args: unknown[]) => mockValidateGitHubModule(...args), + getPythonPath: () => '/tmp/python', + getRunnerPath: () => '/tmp/runner.py', + buildRunnerArgs: (_runnerPath: string, _projectPath: string, command: string, args: string[] = []) => [ + 'runner.py', + command, + ...args, + ], +})); + +vi.mock('../utils/runner-env', () => ({ + getRunnerEnv: (...args: unknown[]) => mockGetRunnerEnv(...args), +})); + +vi.mock('../utils', () => ({ + getGitHubConfig: vi.fn(() => null), + githubFetch: vi.fn(), +})); + +vi.mock('../../../settings-utils', () => ({ + readSettingsFile: vi.fn(() => ({})), +})); + +function createMockWindow(): BrowserWindow { + return { webContents: { send: vi.fn() } } as unknown as BrowserWindow; +} + +function createProject(): Project { + const projectPath = fs.mkdtempSync(path.join(os.tmpdir(), 'github-env-test-')); + tempDirs.push(projectPath); + return { + id: 'project-1', + name: 'Test Project', + path: projectPath, + autoBuildPath: '.auto-claude', + settings: { + model: 'default', + memoryBackend: 'file', + linearSync: false, + notifications: { + onTaskComplete: false, + onTaskFailed: false, + onReviewNeeded: false, + sound: false, + }, + graphitiMcpEnabled: false, + useClaudeMd: true, + }, + createdAt: new Date(), + updatedAt: new Date(), + }; +} + +describe('GitHub runner env usage', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockIpcMain.reset(); + projectRef.current = createProject(); + mockValidateGitHubModule.mockResolvedValue({ valid: true, backendPath: '/tmp/backend' }); + mockGetRunnerEnv.mockResolvedValue({ ANTHROPIC_AUTH_TOKEN: 'token' }); + }); + + afterEach(() => { + for (const dir of tempDirs) { + try { + fs.rmSync(dir, { recursive: true, force: true }); + } catch { + // Ignore cleanup errors for already-removed temp dirs. + } + } + tempDirs.length = 0; + }); + + it('passes runner env to PR review subprocess', async () => { + const { registerPRHandlers } = await import('../pr-handlers'); + + mockRunPythonSubprocess.mockReturnValue({ + process: { pid: 123 }, + promise: Promise.resolve({ + success: true, + exitCode: 0, + stdout: '', + stderr: '', + data: { + prNumber: 123, + repo: 'test/repo', + success: true, + findings: [], + summary: '', + overallStatus: 'comment', + reviewedAt: new Date().toISOString(), + }, + }), + }); + + registerPRHandlers(() => createMockWindow()); + await mockIpcMain.emit(IPC_CHANNELS.GITHUB_PR_REVIEW, projectRef.current?.id, 123); + + expect(mockGetRunnerEnv).toHaveBeenCalledWith({ USE_CLAUDE_MD: 'true' }); + expect(mockRunPythonSubprocess).toHaveBeenCalledWith( + expect.objectContaining({ + env: { ANTHROPIC_AUTH_TOKEN: 'token' }, + }) + ); + }); + + it('passes runner env to triage subprocess', async () => { + const { registerTriageHandlers } = await import('../triage-handlers'); + + mockRunPythonSubprocess.mockReturnValue({ + process: { pid: 124 }, + promise: Promise.resolve({ + success: true, + exitCode: 0, + stdout: '', + stderr: '', + data: [], + }), + }); + + registerTriageHandlers(() => createMockWindow()); + await mockIpcMain.emit(IPC_CHANNELS.GITHUB_TRIAGE_RUN, projectRef.current?.id); + + expect(mockGetRunnerEnv).toHaveBeenCalledWith(); + expect(mockRunPythonSubprocess).toHaveBeenCalledWith( + expect.objectContaining({ + env: { ANTHROPIC_AUTH_TOKEN: 'token' }, + }) + ); + }); + + it('passes runner env to autofix analyze preview subprocess', async () => { + const { registerAutoFixHandlers } = await import('../autofix-handlers'); + const { AgentManager: MockedAgentManager } = await import('../../../agent/agent-manager'); + + mockRunPythonSubprocess.mockReturnValue({ + process: { pid: 125 }, + promise: Promise.resolve({ + success: true, + exitCode: 0, + stdout: '', + stderr: '', + data: { + totalIssues: 0, + primaryIssue: null, + proposedBatches: [], + singleIssues: [], + }, + }), + }); + + const agentManager: AgentManager = new MockedAgentManager(); + const getMainWindow: () => BrowserWindow | null = () => createMockWindow(); + + registerAutoFixHandlers(agentManager, getMainWindow); + await mockIpcMain.emit(IPC_CHANNELS.GITHUB_AUTOFIX_ANALYZE_PREVIEW, projectRef.current?.id); + + expect(mockGetRunnerEnv).toHaveBeenCalledWith(); + expect(mockRunPythonSubprocess).toHaveBeenCalledWith( + expect.objectContaining({ + env: { ANTHROPIC_AUTH_TOKEN: 'token' }, + }) + ); + }); +}); diff --git a/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts index 578ebace52..187eaa5d6b 100644 --- a/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts @@ -28,6 +28,7 @@ import { parseJSONFromOutput, } from './utils/subprocess-runner'; import { AgentManager } from '../../agent/agent-manager'; +import { getRunnerEnv } from './utils/runner-env'; // Debug logging const { debug: debugLog } = createContextLogger('GitHub AutoFix'); @@ -277,11 +278,13 @@ async function checkNewIssues(project: Project): Promise const backendPath = validation.backendPath!; const args = buildRunnerArgs(getRunnerPath(backendPath), project.path, 'check-new'); + const subprocessEnv = await getRunnerEnv(); const { promise } = runPythonSubprocess>({ pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: subprocessEnv, onComplete: (stdout) => { return parseJSONFromOutput>(stdout); }, @@ -361,7 +364,15 @@ async function startAutoFix( // Create spec const taskDescription = buildInvestigationTask(issue.number, issue.title, issueContext); - const specData = await createSpecForIssue(project, issue.number, issue.title, taskDescription, issue.html_url, labels); + const specData = await createSpecForIssue( + project, + issue.number, + issue.title, + taskDescription, + issue.html_url, + labels, + project.settings?.mainBranch // Pass project's configured main branch + ); // Save auto-fix state const issuesDir = path.join(getGitHubDir(project), 'issues'); @@ -607,6 +618,7 @@ export function registerAutoFixHandlers( const backendPath = validation.backendPath!; const additionalArgs = issueNumbers && issueNumbers.length > 0 ? issueNumbers.map(n => n.toString()) : []; const args = buildRunnerArgs(getRunnerPath(backendPath), project.path, 'batch-issues', additionalArgs); + const subprocessEnv = await getRunnerEnv(); debugLog('Spawning batch process', { args }); @@ -614,6 +626,7 @@ export function registerAutoFixHandlers( pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: subprocessEnv, onProgress: (percent, message) => { sendProgress({ phase: 'batching', @@ -728,12 +741,14 @@ export function registerAutoFixHandlers( } const args = buildRunnerArgs(getRunnerPath(backendPath), project.path, 'analyze-preview', additionalArgs); + const subprocessEnv = await getRunnerEnv(); debugLog('Spawning analyze-preview process', { args }); const { promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: subprocessEnv, onProgress: (percent, message) => { sendProgress({ phase: 'analyzing', progress: percent, message }); }, diff --git a/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts index 8a38619e79..9e2e5c0506 100644 --- a/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts @@ -66,7 +66,8 @@ ${issue.body || 'No description provided.'} issue.title, description, issue.html_url, - labelNames + labelNames, + project.settings?.mainBranch // Pass project's configured main branch ); // Start spec creation with the existing spec directory diff --git a/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts index 4f5a36d435..7ddae6e599 100644 --- a/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts @@ -148,7 +148,8 @@ export function registerInvestigateIssue( issue.title, taskDescription, issue.html_url, - labels + labels, + project.settings?.mainBranch // Pass project's configured main branch ); // NOTE: We intentionally do NOT call agentManager.startSpecCreation() here diff --git a/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts index 7f6b01f44a..a8fea6d47b 100644 --- a/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts @@ -16,10 +16,12 @@ import { IPC_CHANNELS, MODEL_ID_MAP, DEFAULT_FEATURE_MODELS, DEFAULT_FEATURE_THI import { getGitHubConfig, githubFetch } from './utils'; import { readSettingsFile } from '../../settings-utils'; import { getAugmentedEnv } from '../../env-utils'; +import { getMemoryService, getDefaultDbPath } from '../../memory-service'; import type { Project, AppSettings } from '../../../shared/types'; import { createContextLogger } from './utils/logger'; import { withProjectOrNull } from './utils/project-middleware'; import { createIPCCommunicators } from './utils/ipc-communicator'; +import { getRunnerEnv } from './utils/runner-env'; import { runPythonSubprocess, getPythonPath, @@ -70,6 +72,13 @@ function getReviewKey(projectId: string, prNumber: number): string { return `${projectId}:${prNumber}`; } +/** + * Returns env vars for Claude.md usage; enabled unless explicitly opted out. + */ +function getClaudeMdEnv(project: Project): Record | undefined { + return project.settings?.useClaudeMd !== false ? { USE_CLAUDE_MD: 'true' } : undefined; +} + /** * PR review finding from AI analysis */ @@ -101,6 +110,7 @@ export interface PRReviewResult { error?: string; // Follow-up review fields reviewedCommitSha?: string; + reviewedFileBlobs?: Record; // filename โ†’ blob SHA for rebase-resistant follow-ups isFollowupReview?: boolean; previousReviewId?: number; resolvedFindings?: string[]; @@ -124,6 +134,174 @@ export interface NewCommitsCheck { hasCommitsAfterPosting?: boolean; } +/** + * Lightweight merge readiness check result + * Used for real-time validation of AI verdict freshness + */ +export interface MergeReadiness { + /** PR is in draft mode */ + isDraft: boolean; + /** GitHub's mergeable status */ + mergeable: 'MERGEABLE' | 'CONFLICTING' | 'UNKNOWN'; + /** Simplified CI status */ + ciStatus: 'passing' | 'failing' | 'pending' | 'none'; + /** List of blockers that contradict a "ready to merge" verdict */ + blockers: string[]; +} + +/** + * PR review memory stored in the memory layer + * Represents key insights and learnings from a PR review + */ +export interface PRReviewMemory { + prNumber: number; + repo: string; + verdict: string; + timestamp: string; + summary: { + verdict: string; + verdict_reasoning?: string; + finding_counts?: Record; + total_findings?: number; + blockers?: string[]; + risk_assessment?: Record; + }; + keyFindings: Array<{ + severity: string; + category: string; + title: string; + description: string; + file: string; + line: number; + }>; + patterns: string[]; + gotchas: string[]; + isFollowup: boolean; +} + +/** + * Save PR review insights to the Electron memory layer (LadybugDB) + * + * Called after a PR review completes to persist learnings for cross-session context. + * Extracts key findings, patterns, and gotchas from the review result. + * + * @param result The completed PR review result + * @param repo Repository name (owner/repo) + * @param isFollowup Whether this is a follow-up review + */ +async function savePRReviewToMemory( + result: PRReviewResult, + repo: string, + isFollowup: boolean = false +): Promise { + const settings = readSettingsFile(); + if (!settings?.memoryEnabled) { + debugLog('Memory not enabled, skipping PR review memory save'); + return; + } + + try { + const memoryService = getMemoryService({ + dbPath: getDefaultDbPath(), + database: 'auto_claude_memory', + }); + + // Build the memory content with comprehensive insights + // We want to capture ALL meaningful findings so the AI can learn from patterns + + // Prioritize findings: critical > high > medium > low + // Include all critical/high, top 5 medium, top 3 low + const criticalFindings = result.findings.filter(f => f.severity === 'critical'); + const highFindings = result.findings.filter(f => f.severity === 'high'); + const mediumFindings = result.findings.filter(f => f.severity === 'medium').slice(0, 5); + const lowFindings = result.findings.filter(f => f.severity === 'low').slice(0, 3); + + const keyFindingsToSave = [ + ...criticalFindings, + ...highFindings, + ...mediumFindings, + ...lowFindings, + ].map(f => ({ + severity: f.severity, + category: f.category, + title: f.title, + description: f.description.substring(0, 500), // Truncate for storage + file: f.file, + line: f.line, + })); + + // Extract gotchas: security issues, critical bugs, and common mistakes + const gotchaCategories = ['security', 'error_handling', 'data_validation', 'race_condition']; + const gotchasToSave = result.findings + .filter(f => + f.severity === 'critical' || + f.severity === 'high' || + gotchaCategories.includes(f.category?.toLowerCase() || '') + ) + .map(f => `[${f.category}] ${f.title}: ${f.description.substring(0, 300)}`); + + // Extract patterns: group findings by category to identify recurring issues + const categoryGroups = result.findings.reduce((acc, f) => { + const cat = f.category || 'general'; + acc[cat] = (acc[cat] || 0) + 1; + return acc; + }, {} as Record); + + // Patterns are categories that appear multiple times (indicates a systematic issue) + const patternsToSave = Object.entries(categoryGroups) + .filter(([_, count]) => count >= 2) + .map(([category, count]) => `${category}: ${count} occurrences`); + + const memoryContent: PRReviewMemory = { + prNumber: result.prNumber, + repo, + verdict: result.overallStatus || 'unknown', + timestamp: new Date().toISOString(), + summary: { + verdict: result.overallStatus || 'unknown', + finding_counts: { + critical: criticalFindings.length, + high: highFindings.length, + medium: result.findings.filter(f => f.severity === 'medium').length, + low: result.findings.filter(f => f.severity === 'low').length, + }, + total_findings: result.findings.length, + }, + keyFindings: keyFindingsToSave, + patterns: patternsToSave, + gotchas: gotchasToSave, + isFollowup, + }; + + // Add follow-up specific info if applicable + if (isFollowup && result.resolvedFindings && result.unresolvedFindings) { + memoryContent.summary.verdict_reasoning = + `Resolved: ${result.resolvedFindings.length}, Unresolved: ${result.unresolvedFindings.length}`; + } + + // Save to memory as a pr_review episode + const episodeName = `PR #${result.prNumber} ${isFollowup ? 'Follow-up ' : ''}Review - ${repo}`; + const saveResult = await memoryService.addEpisode( + episodeName, + memoryContent, + 'pr_review', + `pr_review_${repo.replace('/', '_')}` + ); + + if (saveResult.success) { + debugLog('PR review saved to memory', { prNumber: result.prNumber, episodeId: saveResult.id }); + } else { + debugLog('Failed to save PR review to memory', { error: saveResult.error }); + } + + } catch (error) { + // Don't fail the review if memory save fails + debugLog('Error saving PR review to memory', { + error: error instanceof Error ? error.message : error + }); + } +} + /** * PR data from GitHub API */ @@ -542,6 +720,7 @@ function getReviewResult(project: Project, prNumber: number): PRReviewResult | n error: data.error, // Follow-up review fields (snake_case -> camelCase) reviewedCommitSha: data.reviewed_commit_sha, + reviewedFileBlobs: data.reviewed_file_blobs, isFollowupReview: data.is_followup_review ?? false, previousReviewId: data.previous_review_id, resolvedFindings: data.resolved_findings ?? [], @@ -628,10 +807,9 @@ async function runPRReview( const logCollector = new PRLogCollector(project, prNumber, repo, false); // Build environment with project settings - const subprocessEnv: Record = {}; - if (project.settings?.useClaudeMd !== false) { - subprocessEnv['USE_CLAUDE_MD'] = 'true'; - } + const subprocessEnv = await getRunnerEnv( + getClaudeMdEnv(project) + ); const { process: childProcess, promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), @@ -681,6 +859,12 @@ async function runPRReview( // Finalize logs with success logCollector.finalize(true); + + // Save PR review insights to memory (async, non-blocking) + savePRReviewToMemory(result.data!, repo, false).catch(err => { + debugLog('Failed to save PR review to memory', { error: err.message }); + }); + return result.data!; } finally { // Clean up the registry when done (success or error) @@ -697,11 +881,11 @@ export function registerPRHandlers( ): void { debugLog('Registering PR handlers'); - // List open PRs + // List open PRs with pagination support ipcMain.handle( IPC_CHANNELS.GITHUB_PR_LIST, - async (_, projectId: string): Promise => { - debugLog('listPRs handler called', { projectId }); + async (_, projectId: string, page: number = 1): Promise => { + debugLog('listPRs handler called', { projectId, page }); const result = await withProjectOrNull(projectId, async (project) => { const config = getGitHubConfig(project); if (!config) { @@ -710,9 +894,10 @@ export function registerPRHandlers( } try { + // Use pagination: per_page=100 (GitHub max), page=1,2,3... const prs = await githubFetch( config.token, - `/repos/${config.repo}/pulls?state=open&per_page=50` + `/repos/${config.repo}/pulls?state=open&per_page=100&page=${page}` ) as Array<{ number: number; title: string; @@ -730,7 +915,7 @@ export function registerPRHandlers( html_url: string; }>; - debugLog('Fetched PRs', { count: prs.length }); + debugLog('Fetched PRs', { count: prs.length, page }); return prs.map(pr => ({ number: pr.number, title: pr.title, @@ -864,6 +1049,23 @@ export function registerPRHandlers( } ); + // Batch get saved reviews - more efficient than individual calls + ipcMain.handle( + IPC_CHANNELS.GITHUB_PR_GET_REVIEWS_BATCH, + async (_, projectId: string, prNumbers: number[]): Promise> => { + debugLog('getReviewsBatch handler called', { projectId, count: prNumbers.length }); + const result = await withProjectOrNull(projectId, async (project) => { + const reviews: Record = {}; + for (const prNumber of prNumbers) { + reviews[prNumber] = getReviewResult(project, prNumber); + } + debugLog('Batch loaded reviews', { count: Object.values(reviews).filter(r => r !== null).length }); + return reviews; + }); + return result ?? {}; + } + ); + // Get PR review logs ipcMain.handle( IPC_CHANNELS.GITHUB_PR_GET_LOGS, @@ -967,8 +1169,8 @@ export function registerPRHandlers( // Post review to GitHub ipcMain.handle( IPC_CHANNELS.GITHUB_PR_POST_REVIEW, - async (_, projectId: string, prNumber: number, selectedFindingIds?: string[]): Promise => { - debugLog('postPRReview handler called', { projectId, prNumber, selectedCount: selectedFindingIds?.length }); + async (_, projectId: string, prNumber: number, selectedFindingIds?: string[], options?: { forceApprove?: boolean }): Promise => { + debugLog('postPRReview handler called', { projectId, prNumber, selectedCount: selectedFindingIds?.length, forceApprove: options?.forceApprove }); const postResult = await withProjectOrNull(projectId, async (project) => { const result = getReviewResult(project, prNumber); if (!result) { @@ -991,36 +1193,69 @@ export function registerPRHandlers( debugLog('Posting findings', { total: result.findings.length, selected: findings.length }); - // Build review body - let body = `## ๐Ÿค– Auto Claude PR Review\n\n${result.summary}\n\n`; - - if (findings.length > 0) { - // Show selected count vs total if filtered - const countText = selectedSet - ? `${findings.length} selected of ${result.findings.length} total` - : `${findings.length} total`; - body += `### Findings (${countText})\n\n`; - - for (const f of findings) { - const emoji = { critical: '๐Ÿ”ด', high: '๐ŸŸ ', medium: '๐ŸŸก', low: '๐Ÿ”ต' }[f.severity] || 'โšช'; - body += `#### ${emoji} [${f.severity.toUpperCase()}] ${f.title}\n`; - body += `๐Ÿ“ \`${f.file}:${f.line}\`\n\n`; - body += `${f.description}\n\n`; - // Only show suggested fix if it has actual content - const suggestedFix = f.suggestedFix?.trim(); - if (suggestedFix) { - body += `**Suggested fix:**\n\`\`\`\n${suggestedFix}\n\`\`\`\n\n`; + // Build review body - different format for auto-approve with suggestions + let body: string; + + if (options?.forceApprove) { + // Auto-approve format: clean approval message with optional suggestions + body = `## โœ… Auto Claude Review - APPROVED\n\n`; + body += `**Status:** Ready to Merge\n\n`; + body += `**Summary:** ${result.summary}\n\n`; + + if (findings.length > 0) { + body += `---\n\n`; + body += `### ๐Ÿ’ก Suggestions (${findings.length})\n\n`; + body += `*These are non-blocking suggestions for consideration:*\n\n`; + + for (const f of findings) { + const emoji = { critical: '๐Ÿ”ด', high: '๐ŸŸ ', medium: '๐ŸŸก', low: '๐Ÿ”ต' }[f.severity] || 'โšช'; + body += `#### ${emoji} [${f.id}] [${f.severity.toUpperCase()}] ${f.title}\n`; + body += `๐Ÿ“ \`${f.file}:${f.line}\`\n\n`; + body += `${f.description}\n\n`; + const suggestedFix = f.suggestedFix?.trim(); + if (suggestedFix) { + body += `**Suggested fix:**\n\`\`\`\n${suggestedFix}\n\`\`\`\n\n`; + } } } + + body += `---\n*This automated review found no blocking issues. The PR can be safely merged.*\n\n`; + body += `*Generated by Auto Claude*`; } else { - body += `*No findings selected for this review.*\n\n`; - } + // Standard review format + body = `## ๐Ÿค– Auto Claude PR Review\n\n${result.summary}\n\n`; + + if (findings.length > 0) { + // Show selected count vs total if filtered + const countText = selectedSet + ? `${findings.length} selected of ${result.findings.length} total` + : `${findings.length} total`; + body += `### Findings (${countText})\n\n`; + + for (const f of findings) { + const emoji = { critical: '๐Ÿ”ด', high: '๐ŸŸ ', medium: '๐ŸŸก', low: '๐Ÿ”ต' }[f.severity] || 'โšช'; + body += `#### ${emoji} [${f.id}] [${f.severity.toUpperCase()}] ${f.title}\n`; + body += `๐Ÿ“ \`${f.file}:${f.line}\`\n\n`; + body += `${f.description}\n\n`; + // Only show suggested fix if it has actual content + const suggestedFix = f.suggestedFix?.trim(); + if (suggestedFix) { + body += `**Suggested fix:**\n\`\`\`\n${suggestedFix}\n\`\`\`\n\n`; + } + } + } else { + body += `*No findings selected for this review.*\n\n`; + } - body += `---\n*This review was generated by Auto Claude.*`; + body += `---\n*This review was generated by Auto Claude.*`; + } - // Determine review status based on selected findings + // Determine review status based on selected findings (or force approve) let overallStatus = result.overallStatus; - if (selectedSet) { + if (options?.forceApprove) { + // Force approve regardless of findings + overallStatus = 'approve'; + } else if (selectedSet) { const hasBlocker = findings.some(f => f.severity === 'critical' || f.severity === 'high'); overallStatus = hasBlocker ? 'request_changes' : (findings.length > 0 ? 'comment' : 'approve'); } @@ -1425,6 +1660,137 @@ export function registerPRHandlers( } ); + // Check merge readiness (lightweight freshness check for verdict validation) + ipcMain.handle( + IPC_CHANNELS.GITHUB_PR_CHECK_MERGE_READINESS, + async (_, projectId: string, prNumber: number): Promise => { + debugLog('checkMergeReadiness handler called', { projectId, prNumber }); + + const defaultResult: MergeReadiness = { + isDraft: false, + mergeable: 'UNKNOWN', + ciStatus: 'none', + blockers: [], + }; + + const result = await withProjectOrNull(projectId, async (project) => { + const config = getGitHubConfig(project); + if (!config) { + debugLog('No GitHub config found for checkMergeReadiness'); + return defaultResult; + } + + try { + // Fetch PR data including mergeable status + const pr = await githubFetch( + config.token, + `/repos/${config.repo}/pulls/${prNumber}` + ) as { + draft: boolean; + mergeable: boolean | null; + mergeable_state: string; + head: { sha: string }; + }; + + // Determine mergeable status + let mergeable: MergeReadiness['mergeable'] = 'UNKNOWN'; + if (pr.mergeable === true) { + mergeable = 'MERGEABLE'; + } else if (pr.mergeable === false || pr.mergeable_state === 'dirty') { + mergeable = 'CONFLICTING'; + } + + // Fetch combined commit status for CI + let ciStatus: MergeReadiness['ciStatus'] = 'none'; + try { + const status = await githubFetch( + config.token, + `/repos/${config.repo}/commits/${pr.head.sha}/status` + ) as { + state: 'success' | 'pending' | 'failure' | 'error'; + total_count: number; + }; + + if (status.total_count === 0) { + // No status checks, check for check runs (GitHub Actions) + const checkRuns = await githubFetch( + config.token, + `/repos/${config.repo}/commits/${pr.head.sha}/check-runs` + ) as { + total_count: number; + check_runs: Array<{ conclusion: string | null; status: string }>; + }; + + if (checkRuns.total_count > 0) { + const hasFailing = checkRuns.check_runs.some( + cr => cr.conclusion === 'failure' || cr.conclusion === 'cancelled' + ); + const hasPending = checkRuns.check_runs.some( + cr => cr.status !== 'completed' + ); + + if (hasFailing) { + ciStatus = 'failing'; + } else if (hasPending) { + ciStatus = 'pending'; + } else { + ciStatus = 'passing'; + } + } + } else { + // Use combined status + if (status.state === 'success') { + ciStatus = 'passing'; + } else if (status.state === 'pending') { + ciStatus = 'pending'; + } else { + ciStatus = 'failing'; + } + } + } catch (err) { + debugLog('Failed to fetch CI status', { prNumber, error: err instanceof Error ? err.message : err }); + // Continue without CI status + } + + // Build blockers list + const blockers: string[] = []; + if (pr.draft) { + blockers.push('PR is in draft mode'); + } + if (mergeable === 'CONFLICTING') { + blockers.push('Merge conflicts detected'); + } + if (ciStatus === 'failing') { + blockers.push('CI checks are failing'); + } + + debugLog('checkMergeReadiness result', { + prNumber, + isDraft: pr.draft, + mergeable, + ciStatus, + blockers, + }); + + return { + isDraft: pr.draft, + mergeable, + ciStatus, + blockers, + }; + } catch (error) { + debugLog('Failed to check merge readiness', { + prNumber, + error: error instanceof Error ? error.message : error, + }); + return defaultResult; + } + }); + + return result ?? defaultResult; + } + ); + // Run follow-up review ipcMain.on( IPC_CHANNELS.GITHUB_PR_FOLLOWUP_REVIEW, @@ -1489,10 +1855,9 @@ export function registerPRHandlers( const logCollector = new PRLogCollector(project, prNumber, repo, true); // Build environment with project settings - const followupEnv: Record = {}; - if (project.settings?.useClaudeMd !== false) { - followupEnv['USE_CLAUDE_MD'] = 'true'; - } + const followupEnv = await getRunnerEnv( + getClaudeMdEnv(project) + ); const { process: childProcess, promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), @@ -1541,6 +1906,11 @@ export function registerPRHandlers( // Finalize logs with success logCollector.finalize(true); + // Save follow-up PR review insights to memory (async, non-blocking) + savePRReviewToMemory(result.data!, repo, true).catch(err => { + debugLog('Failed to save follow-up PR review to memory', { error: err.message }); + }); + debugLog('Follow-up review completed', { prNumber, findingsCount: result.data?.findings.length }); sendProgress({ phase: 'complete', @@ -1571,5 +1941,226 @@ export function registerPRHandlers( } ); + // Get workflows awaiting approval for a PR (fork PRs) + ipcMain.handle( + IPC_CHANNELS.GITHUB_WORKFLOWS_AWAITING_APPROVAL, + async (_, projectId: string, prNumber: number): Promise<{ + awaiting_approval: number; + workflow_runs: Array<{ id: number; name: string; html_url: string; workflow_name: string }>; + can_approve: boolean; + error?: string; + }> => { + debugLog('getWorkflowsAwaitingApproval handler called', { projectId, prNumber }); + const result = await withProjectOrNull(projectId, async (project) => { + const config = getGitHubConfig(project); + if (!config) { + return { awaiting_approval: 0, workflow_runs: [], can_approve: false, error: 'No GitHub config' }; + } + + try { + // First get the PR's head SHA + const prData = await githubFetch( + config.token, + `/repos/${config.repo}/pulls/${prNumber}` + ) as { head?: { sha?: string } }; + + const headSha = prData?.head?.sha; + if (!headSha) { + return { awaiting_approval: 0, workflow_runs: [], can_approve: false }; + } + + // Query workflow runs with action_required status + const runsData = await githubFetch( + config.token, + `/repos/${config.repo}/actions/runs?status=action_required&per_page=100` + ) as { workflow_runs?: Array<{ id: number; name: string; html_url: string; head_sha: string; workflow?: { name?: string } }> }; + + const allRuns = runsData?.workflow_runs || []; + + // Filter to only runs for this PR's head SHA + const prRuns = allRuns + .filter(run => run.head_sha === headSha) + .map(run => ({ + id: run.id, + name: run.name, + html_url: run.html_url, + workflow_name: run.workflow?.name || 'Unknown', + })); + + debugLog('Found workflows awaiting approval', { prNumber, count: prRuns.length }); + + return { + awaiting_approval: prRuns.length, + workflow_runs: prRuns, + can_approve: true, // Assume token has permission; will fail if not + }; + } catch (error) { + debugLog('Failed to get workflows awaiting approval', { prNumber, error: error instanceof Error ? error.message : error }); + return { + awaiting_approval: 0, + workflow_runs: [], + can_approve: false, + error: error instanceof Error ? error.message : 'Unknown error', + }; + } + }); + + return result ?? { awaiting_approval: 0, workflow_runs: [], can_approve: false }; + } + ); + + // Approve a workflow run + ipcMain.handle( + IPC_CHANNELS.GITHUB_WORKFLOW_APPROVE, + async (_, projectId: string, runId: number): Promise => { + debugLog('approveWorkflow handler called', { projectId, runId }); + const result = await withProjectOrNull(projectId, async (project) => { + const config = getGitHubConfig(project); + if (!config) { + debugLog('No GitHub config found'); + return false; + } + + try { + // Approve the workflow run + await githubFetch( + config.token, + `/repos/${config.repo}/actions/runs/${runId}/approve`, + { method: 'POST' } + ); + + debugLog('Workflow approved successfully', { runId }); + return true; + } catch (error) { + debugLog('Failed to approve workflow', { runId, error: error instanceof Error ? error.message : error }); + return false; + } + }); + + return result ?? false; + } + ); + + // Get PR review memories from the memory layer + ipcMain.handle( + IPC_CHANNELS.GITHUB_PR_MEMORY_GET, + async (_, projectId: string, limit: number = 10): Promise => { + debugLog('getPRReviewMemories handler called', { projectId, limit }); + const result = await withProjectOrNull(projectId, async (project) => { + const memoryDir = path.join(getGitHubDir(project), 'memory', project.name || 'unknown'); + const memories: PRReviewMemory[] = []; + + // Try to load from file-based storage + try { + const indexPath = path.join(memoryDir, 'reviews_index.json'); + if (!fs.existsSync(indexPath)) { + debugLog('No PR review memories found', { projectId }); + return []; + } + + const indexContent = fs.readFileSync(indexPath, 'utf-8'); + const index = JSON.parse(sanitizeNetworkData(indexContent)); + const reviews = index.reviews || []; + + // Load individual review memories + for (const entry of reviews.slice(0, limit)) { + try { + const reviewPath = path.join(memoryDir, `pr_${entry.pr_number}_review.json`); + if (fs.existsSync(reviewPath)) { + const reviewContent = fs.readFileSync(reviewPath, 'utf-8'); + const memory = JSON.parse(sanitizeNetworkData(reviewContent)); + memories.push({ + prNumber: memory.pr_number, + repo: memory.repo, + verdict: memory.summary?.verdict || 'unknown', + timestamp: memory.timestamp, + summary: memory.summary, + keyFindings: memory.key_findings || [], + patterns: memory.patterns || [], + gotchas: memory.gotchas || [], + isFollowup: memory.is_followup || false, + }); + } + } catch (err) { + debugLog('Failed to load PR review memory', { prNumber: entry.pr_number, error: err instanceof Error ? err.message : err }); + } + } + + debugLog('Loaded PR review memories', { count: memories.length }); + return memories; + } catch (error) { + debugLog('Failed to load PR review memories', { error: error instanceof Error ? error.message : error }); + return []; + } + }); + return result ?? []; + } + ); + + // Search PR review memories + ipcMain.handle( + IPC_CHANNELS.GITHUB_PR_MEMORY_SEARCH, + async (_, projectId: string, query: string, limit: number = 10): Promise => { + debugLog('searchPRReviewMemories handler called', { projectId, query, limit }); + const result = await withProjectOrNull(projectId, async (project) => { + const memoryDir = path.join(getGitHubDir(project), 'memory', project.name || 'unknown'); + const memories: PRReviewMemory[] = []; + const queryLower = query.toLowerCase(); + + // Search through file-based storage + try { + const indexPath = path.join(memoryDir, 'reviews_index.json'); + if (!fs.existsSync(indexPath)) { + return []; + } + + const indexContent = fs.readFileSync(indexPath, 'utf-8'); + const index = JSON.parse(sanitizeNetworkData(indexContent)); + const reviews = index.reviews || []; + + // Search individual review memories + for (const entry of reviews) { + try { + const reviewPath = path.join(memoryDir, `pr_${entry.pr_number}_review.json`); + if (fs.existsSync(reviewPath)) { + const reviewContent = fs.readFileSync(reviewPath, 'utf-8'); + + // Check if content matches query + if (reviewContent.toLowerCase().includes(queryLower)) { + const memory = JSON.parse(sanitizeNetworkData(reviewContent)); + memories.push({ + prNumber: memory.pr_number, + repo: memory.repo, + verdict: memory.summary?.verdict || 'unknown', + timestamp: memory.timestamp, + summary: memory.summary, + keyFindings: memory.key_findings || [], + patterns: memory.patterns || [], + gotchas: memory.gotchas || [], + isFollowup: memory.is_followup || false, + }); + } + } + + // Stop if we have enough + if (memories.length >= limit) { + break; + } + } catch (err) { + debugLog('Failed to search PR review memory', { prNumber: entry.pr_number, error: err instanceof Error ? err.message : err }); + } + } + + debugLog('Found matching PR review memories', { count: memories.length, query }); + return memories; + } catch (error) { + debugLog('Failed to search PR review memories', { error: error instanceof Error ? error.message : error }); + return []; + } + }); + return result ?? []; + } + ); + debugLog('PR handlers registered'); } diff --git a/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts b/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts index b233f59bb1..7e71b12640 100644 --- a/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts +++ b/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts @@ -8,6 +8,7 @@ import { AUTO_BUILD_PATHS, getSpecsDir } from '../../../shared/constants'; import type { Project, TaskMetadata } from '../../../shared/types'; import { withSpecNumberLock } from '../../utils/spec-number-lock'; import { debugLog } from './utils/logger'; +import { labelMatchesWholeWord } from '../shared/label-utils'; export interface SpecCreationData { specId: string; @@ -55,7 +56,14 @@ function determineCategoryFromLabels(labels: string[]): 'feature' | 'bug_fix' | } // Check for infrastructure labels - if (lowerLabels.some(l => l.includes('infrastructure') || l.includes('devops') || l.includes('deployment') || l.includes('ci') || l.includes('cd'))) { + // Use whole-word matching for 'ci' and 'cd' to avoid false positives like 'acid' or 'decide' + if (lowerLabels.some(l => + l.includes('infrastructure') || + l.includes('devops') || + l.includes('deployment') || + labelMatchesWholeWord(l, 'ci') || + labelMatchesWholeWord(l, 'cd') + )) { return 'infrastructure'; } @@ -89,7 +97,8 @@ export async function createSpecForIssue( issueTitle: string, taskDescription: string, githubUrl: string, - labels: string[] = [] + labels: string[] = [], + baseBranch?: string ): Promise { const specsBaseDir = getSpecsDir(project.autoBuildPath); const specsDir = path.join(project.path, specsBaseDir); @@ -144,7 +153,10 @@ export async function createSpecForIssue( sourceType: 'github', githubIssueNumber: issueNumber, githubUrl, - category + category, + // Store baseBranch for worktree creation and QA comparison + // This comes from project.settings.mainBranch or task-level override + ...(baseBranch && { baseBranch }) }; writeFileSync( path.join(specDir, 'task_metadata.json'), diff --git a/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts index 7e0f960be5..a84e44a79c 100644 --- a/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts @@ -19,6 +19,7 @@ import type { Project, AppSettings } from '../../../shared/types'; import { createContextLogger } from './utils/logger'; import { withProjectOrNull } from './utils/project-middleware'; import { createIPCCommunicators } from './utils/ipc-communicator'; +import { getRunnerEnv } from './utils/runner-env'; import { runPythonSubprocess, getPythonPath, @@ -254,10 +255,13 @@ async function runTriage( debugLog('Spawning triage process', { args, model, thinkingLevel }); + const subprocessEnv = await getRunnerEnv(); + const { promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: subprocessEnv, onProgress: (percent, message) => { debugLog('Progress update', { percent, message }); sendProgress({ diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/__tests__/runner-env.test.ts b/apps/frontend/src/main/ipc-handlers/github/utils/__tests__/runner-env.test.ts new file mode 100644 index 0000000000..0ffd9fa29d --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/github/utils/__tests__/runner-env.test.ts @@ -0,0 +1,122 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; + +const mockGetAPIProfileEnv = vi.fn(); +const mockGetOAuthModeClearVars = vi.fn(); +const mockGetPythonEnv = vi.fn(); +const mockGetProfileEnv = vi.fn(); + +vi.mock('../../../../services/profile', () => ({ + getAPIProfileEnv: (...args: unknown[]) => mockGetAPIProfileEnv(...args), +})); + +vi.mock('../../../../agent/env-utils', () => ({ + getOAuthModeClearVars: (...args: unknown[]) => mockGetOAuthModeClearVars(...args), +})); + +vi.mock('../../../../python-env-manager', () => ({ + pythonEnvManager: { + getPythonEnv: () => mockGetPythonEnv(), + }, +})); + +vi.mock('../../../../rate-limit-detector', () => ({ + getProfileEnv: () => mockGetProfileEnv(), +})); + +import { getRunnerEnv } from '../runner-env'; + +describe('getRunnerEnv', () => { + beforeEach(() => { + vi.clearAllMocks(); + // Default mock for Python env - minimal env for testing + mockGetPythonEnv.mockReturnValue({ + PYTHONDONTWRITEBYTECODE: '1', + PYTHONIOENCODING: 'utf-8', + PYTHONNOUSERSITE: '1', + PYTHONPATH: '/bundled/site-packages', + }); + // Default mock for profile env - returns empty by default + mockGetProfileEnv.mockReturnValue({}); + }); + + it('merges Python env with API profile env and OAuth clear vars', async () => { + mockGetAPIProfileEnv.mockResolvedValue({ + ANTHROPIC_AUTH_TOKEN: 'token', + ANTHROPIC_BASE_URL: 'https://api.example.com', + }); + mockGetOAuthModeClearVars.mockReturnValue({ + ANTHROPIC_AUTH_TOKEN: '', + }); + + const result = await getRunnerEnv(); + + expect(mockGetOAuthModeClearVars).toHaveBeenCalledWith({ + ANTHROPIC_AUTH_TOKEN: 'token', + ANTHROPIC_BASE_URL: 'https://api.example.com', + }); + // Python env is included first, then overridden by OAuth clear vars + expect(result).toMatchObject({ + PYTHONPATH: '/bundled/site-packages', + PYTHONDONTWRITEBYTECODE: '1', + ANTHROPIC_AUTH_TOKEN: '', + ANTHROPIC_BASE_URL: 'https://api.example.com', + }); + }); + + it('includes extra env values with highest precedence', async () => { + mockGetAPIProfileEnv.mockResolvedValue({ + ANTHROPIC_AUTH_TOKEN: 'token', + }); + mockGetOAuthModeClearVars.mockReturnValue({}); + + const result = await getRunnerEnv({ USE_CLAUDE_MD: 'true' }); + + expect(result).toMatchObject({ + PYTHONPATH: '/bundled/site-packages', + ANTHROPIC_AUTH_TOKEN: 'token', + USE_CLAUDE_MD: 'true', + }); + }); + + it('includes PYTHONPATH for bundled packages (fixes #139)', async () => { + mockGetAPIProfileEnv.mockResolvedValue({}); + mockGetOAuthModeClearVars.mockReturnValue({}); + mockGetPythonEnv.mockReturnValue({ + PYTHONPATH: '/app/Contents/Resources/python-site-packages', + }); + + const result = await getRunnerEnv(); + + expect(result.PYTHONPATH).toBe('/app/Contents/Resources/python-site-packages'); + }); + + it('includes profileEnv for OAuth token (fixes #563)', async () => { + mockGetAPIProfileEnv.mockResolvedValue({}); + mockGetOAuthModeClearVars.mockReturnValue({}); + mockGetProfileEnv.mockReturnValue({ + CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-123', + }); + + const result = await getRunnerEnv(); + + expect(result.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-123'); + }); + + it('applies correct precedence order with profileEnv overriding pythonEnv', async () => { + mockGetPythonEnv.mockReturnValue({ + SHARED_VAR: 'from-python', + }); + mockGetAPIProfileEnv.mockResolvedValue({ + SHARED_VAR: 'from-api-profile', + }); + mockGetOAuthModeClearVars.mockReturnValue({}); + mockGetProfileEnv.mockReturnValue({ + SHARED_VAR: 'from-profile', + }); + + const result = await getRunnerEnv({ SHARED_VAR: 'from-extra' }); + + // extraEnv has highest precedence + expect(result.SHARED_VAR).toBe('from-extra'); + }); +}); diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/runner-env.ts b/apps/frontend/src/main/ipc-handlers/github/utils/runner-env.ts new file mode 100644 index 0000000000..ace24490bc --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/github/utils/runner-env.ts @@ -0,0 +1,38 @@ +import { getOAuthModeClearVars } from '../../../agent/env-utils'; +import { getAPIProfileEnv } from '../../../services/profile'; +import { getProfileEnv } from '../../../rate-limit-detector'; +import { pythonEnvManager } from '../../../python-env-manager'; + +/** + * Get environment variables for Python runner subprocesses. + * + * Environment variable precedence (lowest to highest): + * 1. pythonEnv - Python environment including PYTHONPATH for bundled packages (fixes #139) + * 2. apiProfileEnv - Custom Anthropic-compatible API profile (ANTHROPIC_BASE_URL, ANTHROPIC_AUTH_TOKEN) + * 3. oauthModeClearVars - Clears stale ANTHROPIC_* vars when in OAuth mode + * 4. profileEnv - Claude OAuth token from profile manager (CLAUDE_CODE_OAUTH_TOKEN) + * 5. extraEnv - Caller-specific vars (e.g., USE_CLAUDE_MD) + * + * The pythonEnv is critical for packaged apps (#139) - without PYTHONPATH, Python + * cannot find bundled dependencies like dotenv, claude_agent_sdk, etc. + * + * The profileEnv is critical for OAuth authentication (#563) - it retrieves the + * decrypted OAuth token from the profile manager's encrypted storage (macOS Keychain + * via Electron's safeStorage API). + */ +export async function getRunnerEnv( + extraEnv?: Record +): Promise> { + const pythonEnv = pythonEnvManager.getPythonEnv(); + const apiProfileEnv = await getAPIProfileEnv(); + const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv); + const profileEnv = getProfileEnv(); + + return { + ...pythonEnv, // Python environment including PYTHONPATH (fixes #139) + ...apiProfileEnv, + ...oauthModeClearVars, + ...profileEnv, // OAuth token from profile manager (fixes #563) + ...extraEnv, + }; +} diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts index 8fe079820b..49c504b061 100644 --- a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts +++ b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts @@ -4,11 +4,15 @@ import { runPythonSubprocess } from './subprocess-runner'; import * as childProcess from 'child_process'; import EventEmitter from 'events'; -// Mock child_process.spawn -vi.mock('child_process', () => ({ - spawn: vi.fn(), - exec: vi.fn(), -})); +// Mock child_process with importOriginal to preserve all exports +vi.mock('child_process', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + spawn: vi.fn(), + exec: vi.fn(), + }; +}); // Mock parsePythonCommand vi.mock('../../../python-detector', () => ({ @@ -72,7 +76,7 @@ describe('runPythonSubprocess', () => { const pythonPath = 'python'; const pythonBaseArgs = ['-u', '-X', 'utf8']; const userArgs = ['script.py', '--verbose']; - + // Setup mock to simulate what parsePythonCommand would return for a standard python path vi.mocked(parsePythonCommand).mockReturnValue(['python', pythonBaseArgs]); @@ -87,11 +91,126 @@ describe('runPythonSubprocess', () => { // The critical check: verify the ORDER of arguments in the second parameter of spawn // expect call to be: spawn('python', ['-u', '-X', 'utf8', 'script.py', '--verbose'], ...) const expectedArgs = [...pythonBaseArgs, ...userArgs]; - + expect(mockSpawn).toHaveBeenCalledWith( expect.any(String), expectedArgs, // Exact array match verifies order expect.any(Object) ); }); + + describe('environment handling', () => { + it('should use caller-provided env directly when options.env is set', () => { + // Arrange + const customEnv = { + PATH: '/custom/path', + PYTHONPATH: '/custom/pythonpath', + ANTHROPIC_AUTH_TOKEN: 'custom-token', + }; + vi.mocked(parsePythonCommand).mockReturnValue(['python', []]); + + // Act + runPythonSubprocess({ + pythonPath: 'python', + args: ['script.py'], + cwd: '/tmp', + env: customEnv, + }); + + // Assert - should use the exact env provided + expect(mockSpawn).toHaveBeenCalledWith( + expect.any(String), + expect.any(Array), + expect.objectContaining({ + env: customEnv, + }) + ); + }); + + it('should create fallback env when options.env is not provided', () => { + // Arrange + const originalEnv = process.env; + try { + process.env = { + PATH: '/usr/bin', + HOME: '/home/user', + USER: 'testuser', + SHELL: '/bin/bash', + LANG: 'en_US.UTF-8', + CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token', + ANTHROPIC_API_KEY: 'api-key', + SENSITIVE_VAR: 'should-not-leak', + }; + + vi.mocked(parsePythonCommand).mockReturnValue(['python', []]); + + // Act + runPythonSubprocess({ + pythonPath: 'python', + args: ['script.py'], + cwd: '/tmp', + // No env provided - should use fallback + }); + + // Assert - should only include safe vars + const spawnCall = mockSpawn.mock.calls[0]; + const envArg = spawnCall[2].env; + + // Safe vars should be included + expect(envArg.PATH).toBe('/usr/bin'); + expect(envArg.HOME).toBe('/home/user'); + expect(envArg.USER).toBe('testuser'); + + // CLAUDE_ and ANTHROPIC_ prefixed vars should be included + expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token'); + expect(envArg.ANTHROPIC_API_KEY).toBe('api-key'); + + // Sensitive vars should NOT be included + expect(envArg.SENSITIVE_VAR).toBeUndefined(); + } finally { + // Restore - always runs even if assertions fail + process.env = originalEnv; + } + }); + + it('fallback env should include platform-specific vars on Windows', () => { + // Arrange + const originalEnv = process.env; + try { + process.env = { + PATH: 'C:\\Windows\\System32', + SYSTEMROOT: 'C:\\Windows', + COMSPEC: 'C:\\Windows\\System32\\cmd.exe', + PATHEXT: '.COM;.EXE;.BAT', + WINDIR: 'C:\\Windows', + USERPROFILE: 'C:\\Users\\test', + APPDATA: 'C:\\Users\\test\\AppData\\Roaming', + LOCALAPPDATA: 'C:\\Users\\test\\AppData\\Local', + }; + + vi.mocked(parsePythonCommand).mockReturnValue(['python', []]); + + // Act + runPythonSubprocess({ + pythonPath: 'python', + args: ['script.py'], + cwd: '/tmp', + // No env provided - should use fallback + }); + + // Assert - Windows-specific vars should be included + const spawnCall = mockSpawn.mock.calls[0]; + const envArg = spawnCall[2].env; + + expect(envArg.SYSTEMROOT).toBe('C:\\Windows'); + expect(envArg.COMSPEC).toBe('C:\\Windows\\System32\\cmd.exe'); + expect(envArg.PATHEXT).toBe('.COM;.EXE;.BAT'); + expect(envArg.USERPROFILE).toBe('C:\\Users\\test'); + expect(envArg.APPDATA).toBe('C:\\Users\\test\\AppData\\Roaming'); + } finally { + // Restore - always runs even if assertions fail + process.env = originalEnv; + } + }); + }); }); diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.ts b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.ts index db6ae7dc0e..5b1700cf1b 100644 --- a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.ts +++ b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.ts @@ -15,6 +15,36 @@ import { parsePythonCommand } from '../../../python-detector'; const execAsync = promisify(exec); +/** + * Create a fallback environment for Python subprocesses when no env is provided. + * This is used for backwards compatibility when callers don't use getRunnerEnv(). + * + * Includes: + * - Platform-specific vars needed for shell commands and CLI tools + * - CLAUDE_ and ANTHROPIC_ prefixed vars for authentication + */ +function createFallbackRunnerEnv(): Record { + // Include platform-specific vars needed for shell commands and CLI tools + // Windows: SYSTEMROOT, COMSPEC, PATHEXT, WINDIR for shell; USERPROFILE, APPDATA, LOCALAPPDATA for gh CLI auth + const safeEnvVars = ['PATH', 'HOME', 'USER', 'SHELL', 'LANG', 'LC_ALL', 'TERM', 'TMPDIR', 'TMP', 'TEMP', 'DEBUG', 'SYSTEMROOT', 'COMSPEC', 'PATHEXT', 'WINDIR', 'USERPROFILE', 'APPDATA', 'LOCALAPPDATA', 'HOMEDRIVE', 'HOMEPATH']; + const fallbackEnv: Record = {}; + + for (const key of safeEnvVars) { + if (process.env[key]) { + fallbackEnv[key] = process.env[key]!; + } + } + + // Also include any CLAUDE_ or ANTHROPIC_ prefixed vars needed for auth + for (const [key, value] of Object.entries(process.env)) { + if ((key.startsWith('CLAUDE_') || key.startsWith('ANTHROPIC_')) && value) { + fallbackEnv[key] = value; + } + } + + return fallbackEnv; +} + /** * Options for running a Python subprocess */ @@ -54,41 +84,30 @@ export interface SubprocessResult { export function runPythonSubprocess( options: SubprocessOptions ): { process: ChildProcess; promise: Promise> } { - // Don't set PYTHONPATH - let runner.py manage its own import paths - // Setting PYTHONPATH can interfere with runner.py's sys.path manipulation - // Filter environment variables to only include necessary ones (prevent leaking secrets) + // Use the environment provided by the caller (from getRunnerEnv()). + // getRunnerEnv() provides: + // - pythonEnvManager.getPythonEnv() which includes PYTHONPATH for bundled packages (fixes #139) + // - API profile environment (ANTHROPIC_BASE_URL, ANTHROPIC_AUTH_TOKEN) + // - OAuth mode clearing vars + // - Claude OAuth token (CLAUDE_CODE_OAUTH_TOKEN) + // + // If no env is provided, fall back to filtered process.env for backwards compatibility. // Note: DEBUG is included for PR review debugging (shows LLM thinking blocks). - // This is safe because: (1) user must explicitly enable via npm run dev:debug, - // (2) it only enables our internal debug logging, not third-party framework debugging, - // (3) no sensitive values are logged - only LLM reasoning and response text. - // Include platform-specific vars needed for shell commands and CLI tools - // Windows: SYSTEMROOT, COMSPEC, PATHEXT, WINDIR for shell; USERPROFILE, APPDATA, LOCALAPPDATA for gh CLI auth - const safeEnvVars = ['PATH', 'HOME', 'USER', 'SHELL', 'LANG', 'LC_ALL', 'TERM', 'TMPDIR', 'TMP', 'TEMP', 'DEBUG', 'SYSTEMROOT', 'COMSPEC', 'PATHEXT', 'WINDIR', 'USERPROFILE', 'APPDATA', 'LOCALAPPDATA', 'HOMEDRIVE', 'HOMEPATH']; - const filteredEnv: Record = {}; - for (const key of safeEnvVars) { - if (process.env[key]) { - filteredEnv[key] = process.env[key]!; - } - } - // Also include any CLAUDE_ or ANTHROPIC_ prefixed vars needed for auth - for (const [key, value] of Object.entries(process.env)) { - if ((key.startsWith('CLAUDE_') || key.startsWith('ANTHROPIC_')) && value) { - filteredEnv[key] = value; - } - } + let subprocessEnv: Record; - // Merge in any additional env vars passed by the caller (e.g., USE_CLAUDE_MD) if (options.env) { - for (const [key, value] of Object.entries(options.env)) { - filteredEnv[key] = value; - } + // Caller provided a complete environment (from getRunnerEnv()), use it directly + subprocessEnv = { ...options.env }; + } else { + // Fallback: build a filtered environment for backwards compatibility + subprocessEnv = createFallbackRunnerEnv(); } // Parse Python command to handle paths with spaces (e.g., ~/Library/Application Support/...) const [pythonCommand, pythonBaseArgs] = parsePythonCommand(options.pythonPath); const child = spawn(pythonCommand, [...pythonBaseArgs, ...options.args], { cwd: options.cwd, - env: filteredEnv, + env: subprocessEnv, }); const promise = new Promise>((resolve) => { diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts b/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts index eea6215d90..7b343efb27 100644 --- a/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts @@ -63,7 +63,7 @@ export function registerImportIssues(): void { ) as GitLabAPIIssue; // Create a spec/task from the issue - const task = await createSpecForIssue(project, apiIssue, config); + const task = await createSpecForIssue(project, apiIssue, config, project.settings?.mainBranch); if (task) { tasks.push(task); diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts b/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts index 20b1a422cd..f383f03204 100644 --- a/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts @@ -158,7 +158,7 @@ export function registerInvestigateIssue( }); // Create spec for the issue - const task = await createSpecForIssue(project, issue, config); + const task = await createSpecForIssue(project, issue, config, project.settings?.mainBranch); if (!task) { sendError(getMainWindow, project.id, 'Failed to create task from issue'); diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/mr-review-handlers.ts b/apps/frontend/src/main/ipc-handlers/gitlab/mr-review-handlers.ts index 62cb9e0e8e..b4c310804d 100644 --- a/apps/frontend/src/main/ipc-handlers/gitlab/mr-review-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/gitlab/mr-review-handlers.ts @@ -33,6 +33,7 @@ import { getPythonPath, buildRunnerArgs, } from '../github/utils/subprocess-runner'; +import { getRunnerEnv } from '../github/utils/runner-env'; /** * Get the GitLab runner path @@ -216,10 +217,14 @@ async function runMRReview( debugLog('Spawning MR review process', { args, model, thinkingLevel }); + // Get runner environment with PYTHONPATH for bundled packages (fixes #139) + const subprocessEnv = await getRunnerEnv(); + const { process: childProcess, promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: subprocessEnv, onProgress: (percent, message) => { debugLog('Progress update', { percent, message }); sendProgress({ @@ -821,10 +826,14 @@ export function registerMRReviewHandlers( debugLog('Spawning follow-up review process', { args, model, thinkingLevel }); + // Get runner environment with PYTHONPATH for bundled packages (fixes #139) + const followupSubprocessEnv = await getRunnerEnv(); + const { process: childProcess, promise } = runPythonSubprocess({ pythonPath: getPythonPath(backendPath), args, cwd: backendPath, + env: followupSubprocessEnv, onProgress: (percent, message) => { debugLog('Progress update', { percent, message }); sendProgress({ diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts b/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts index a8830ca320..c624a63f70 100644 --- a/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts +++ b/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts @@ -7,6 +7,7 @@ import { mkdir, writeFile, readFile, stat } from 'fs/promises'; import path from 'path'; import type { Project } from '../../../shared/types'; import type { GitLabAPIIssue, GitLabConfig } from './types'; +import { labelMatchesWholeWord } from '../shared/label-utils'; /** * Simplified task info returned when creating a spec from a GitLab issue. @@ -60,6 +61,47 @@ function debugLog(message: string, data?: unknown): void { } } +/** + * Determine task category based on GitLab issue labels + * Maps to TaskCategory type from shared/types/task.ts + */ +function determineCategoryFromLabels(labels: string[]): 'feature' | 'bug_fix' | 'refactoring' | 'documentation' | 'security' | 'performance' | 'ui_ux' | 'infrastructure' | 'testing' { + const lowerLabels = labels.map(l => l.toLowerCase()); + + if (lowerLabels.some(l => l.includes('bug') || l.includes('defect') || l.includes('error') || l.includes('fix'))) { + return 'bug_fix'; + } + if (lowerLabels.some(l => l.includes('security') || l.includes('vulnerability') || l.includes('cve'))) { + return 'security'; + } + if (lowerLabels.some(l => l.includes('performance') || l.includes('optimization') || l.includes('speed'))) { + return 'performance'; + } + if (lowerLabels.some(l => l.includes('ui') || l.includes('ux') || l.includes('design') || l.includes('styling'))) { + return 'ui_ux'; + } + // Use whole-word matching for 'ci' and 'cd' to avoid false positives like 'acid' or 'decide' + if (lowerLabels.some(l => + l.includes('infrastructure') || + l.includes('devops') || + l.includes('deployment') || + labelMatchesWholeWord(l, 'ci') || + labelMatchesWholeWord(l, 'cd') + )) { + return 'infrastructure'; + } + if (lowerLabels.some(l => l.includes('test') || l.includes('testing') || l.includes('qa'))) { + return 'testing'; + } + if (lowerLabels.some(l => l.includes('refactor') || l.includes('cleanup') || l.includes('maintenance') || l.includes('chore') || l.includes('tech-debt') || l.includes('technical debt'))) { + return 'refactoring'; + } + if (lowerLabels.some(l => l.includes('documentation') || l.includes('docs'))) { + return 'documentation'; + } + return 'feature'; +} + function stripControlChars(value: string, allowNewlines: boolean): string { let sanitized = ''; for (let i = 0; i < value.length; i += 1) { @@ -258,7 +300,8 @@ async function pathExists(filePath: string): Promise { export async function createSpecForIssue( project: Project, issue: GitLabAPIIssue, - config: GitLabConfig + config: GitLabConfig, + baseBranch?: string ): Promise { try { // Validate and sanitize network data before writing to disk @@ -321,7 +364,7 @@ export async function createSpecForIssue( const taskContent = buildIssueContext(safeIssue, safeProject, config.instanceUrl); await writeFile(path.join(specDir, 'TASK.md'), taskContent, 'utf-8'); - // Create metadata.json + // Create metadata.json (legacy format for GitLab-specific data) const metadata = { source: 'gitlab', gitlab: { @@ -339,6 +382,21 @@ export async function createSpecForIssue( }; await writeFile(metadataPath, JSON.stringify(metadata, null, 2), 'utf-8'); + // Create task_metadata.json (consistent with GitHub format for backend compatibility) + const taskMetadata = { + sourceType: 'gitlab' as const, + gitlabIssueIid: safeIssue.iid, + gitlabUrl: safeIssue.web_url, + category: determineCategoryFromLabels(safeIssue.labels || []), + // Store baseBranch for worktree creation and QA comparison + ...(baseBranch && { baseBranch }) + }; + await writeFile( + path.join(specDir, 'task_metadata.json'), + JSON.stringify(taskMetadata, null, 2), + 'utf-8' + ); + debugLog('Created spec for issue:', { iid: safeIssue.iid, specDir }); // Return task info diff --git a/apps/frontend/src/main/ipc-handlers/index.ts b/apps/frontend/src/main/ipc-handlers/index.ts index 3501abd8bc..b3ee57212b 100644 --- a/apps/frontend/src/main/ipc-handlers/index.ts +++ b/apps/frontend/src/main/ipc-handlers/index.ts @@ -23,7 +23,6 @@ import { registerEnvHandlers } from './env-handlers'; import { registerLinearHandlers } from './linear-handlers'; import { registerGithubHandlers } from './github-handlers'; import { registerGitlabHandlers } from './gitlab-handlers'; -import { registerAutobuildSourceHandlers } from './autobuild-source-handlers'; import { registerIdeationHandlers } from './ideation-handlers'; import { registerChangelogHandlers } from './changelog-handlers'; import { registerInsightsHandlers } from './insights-handlers'; @@ -32,6 +31,8 @@ import { registerAppUpdateHandlers } from './app-update-handlers'; import { registerDebugHandlers } from './debug-handlers'; import { registerClaudeCodeHandlers } from './claude-code-handlers'; import { registerMcpHandlers } from './mcp-handlers'; +import { registerProfileHandlers } from './profile-handlers'; +import { registerTerminalWorktreeIpcHandlers } from './terminal'; import { notificationService } from '../notification-service'; /** @@ -60,6 +61,9 @@ export function setupIpcHandlers( // Terminal and Claude profile handlers registerTerminalHandlers(terminalManager, getMainWindow); + // Terminal worktree handlers (isolated development in worktrees) + registerTerminalWorktreeIpcHandlers(); + // Agent event handlers (event forwarding from agent manager to renderer) registerAgenteventsHandlers(agentManager, getMainWindow); @@ -87,9 +91,6 @@ export function setupIpcHandlers( // GitLab integration handlers registerGitlabHandlers(agentManager, getMainWindow); - // Auto-build source update handlers - registerAutobuildSourceHandlers(getMainWindow); - // Ideation handlers registerIdeationHandlers(agentManager, getMainWindow); @@ -114,6 +115,9 @@ export function setupIpcHandlers( // MCP server health check handlers registerMcpHandlers(); + // API Profile handlers (custom Anthropic-compatible endpoints) + registerProfileHandlers(); + console.warn('[IPC] All handler modules registered successfully'); } @@ -122,6 +126,7 @@ export { registerProjectHandlers, registerTaskHandlers, registerTerminalHandlers, + registerTerminalWorktreeIpcHandlers, registerAgenteventsHandlers, registerSettingsHandlers, registerFileHandlers, @@ -131,7 +136,6 @@ export { registerLinearHandlers, registerGithubHandlers, registerGitlabHandlers, - registerAutobuildSourceHandlers, registerIdeationHandlers, registerChangelogHandlers, registerInsightsHandlers, @@ -139,5 +143,6 @@ export { registerAppUpdateHandlers, registerDebugHandlers, registerClaudeCodeHandlers, - registerMcpHandlers + registerMcpHandlers, + registerProfileHandlers }; diff --git a/apps/frontend/src/main/ipc-handlers/insights-handlers.ts b/apps/frontend/src/main/ipc-handlers/insights-handlers.ts index cef96a6d7d..11a18c0b88 100644 --- a/apps/frontend/src/main/ipc-handlers/insights-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/insights-handlers.ts @@ -42,9 +42,27 @@ export function registerInsightsHandlers( return; } - // Note: Python environment initialization should be handled by insightsService - // or added here with proper dependency injection if needed - insightsService.sendMessage(projectId, project.path, message, modelConfig); + // Await the async sendMessage to ensure proper error handling and + // that all async operations (like getProcessEnv) complete before + // the handler returns. This fixes race conditions on Windows where + // environment setup wouldn't complete before process spawn. + try { + await insightsService.sendMessage(projectId, project.path, message, modelConfig); + } catch (error) { + // Errors during sendMessage (executor errors) are already emitted via + // the 'error' event, but we catch here to prevent unhandled rejection + // and ensure all error types are reported to the UI + console.error('[Insights IPC] Error in sendMessage:', error); + const mainWindow = getMainWindow(); + if (mainWindow) { + const errorMessage = error instanceof Error ? error.message : String(error); + mainWindow.webContents.send( + IPC_CHANNELS.INSIGHTS_ERROR, + projectId, + `Failed to send message: ${errorMessage}` + ); + } + } } ); diff --git a/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts b/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts index 0515529973..50e16973e4 100644 --- a/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts @@ -28,6 +28,12 @@ const DANGEROUS_FLAGS = new Set([ '--require', '-r' ]); +/** + * Defense-in-depth: Shell metacharacters that could enable command injection + * when shell: true is used on Windows + */ +const SHELL_METACHARACTERS = ['&', '|', '>', '<', '^', '%', ';', '$', '`', '\n', '\r']; + /** * Validate that a command is in the safe allowlist */ @@ -39,11 +45,22 @@ function isCommandSafe(command: string | undefined): boolean { } /** - * Validate that args don't contain dangerous interpreter flags + * Validate that args don't contain dangerous interpreter flags or shell metacharacters */ function areArgsSafe(args: string[] | undefined): boolean { if (!args || args.length === 0) return true; - return !args.some(arg => DANGEROUS_FLAGS.has(arg)); + + // Check for dangerous interpreter flags + if (args.some(arg => DANGEROUS_FLAGS.has(arg))) return false; + + // On Windows with shell: true, check for shell metacharacters that could enable injection + if (process.platform === 'win32') { + if (args.some(arg => SHELL_METACHARACTERS.some(char => arg.includes(char)))) { + return false; + } + } + + return true; } /** @@ -171,7 +188,7 @@ async function checkCommandHealth(server: CustomMcpServer, startTime: number): P return resolve({ serverId: server.id, status: 'unhealthy', - message: 'Args contain dangerous interpreter flags', + message: 'Args contain dangerous flags or shell metacharacters', checkedAt: new Date().toISOString(), }); } @@ -394,14 +411,17 @@ async function testCommandConnection(server: CustomMcpServer, startTime: number) return resolve({ serverId: server.id, success: false, - message: 'Args contain dangerous interpreter flags', + message: 'Args contain dangerous flags or shell metacharacters', }); } const args = server.args || []; + + // On Windows, use shell: true to properly handle .cmd/.bat scripts like npx const proc = spawn(server.command!, args, { stdio: ['pipe', 'pipe', 'pipe'], timeout: 15000, // OS-level timeout for reliable process termination + shell: process.platform === 'win32', // Required for Windows to run npx.cmd }); let stdout = ''; diff --git a/apps/frontend/src/main/ipc-handlers/memory-handlers.ts b/apps/frontend/src/main/ipc-handlers/memory-handlers.ts index 5b8c6d0504..9ea2b79ab4 100644 --- a/apps/frontend/src/main/ipc-handlers/memory-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/memory-handlers.ts @@ -25,7 +25,7 @@ import { } from '../memory-service'; import { validateOpenAIApiKey } from '../api-validation-service'; import { parsePythonCommand } from '../python-detector'; -import { getConfiguredPythonPath } from '../python-env-manager'; +import { getConfiguredPythonPath, pythonEnvManager } from '../python-env-manager'; import { openTerminalWithCommand } from './claude-code-handlers'; /** @@ -212,7 +212,11 @@ function checkOllamaInstalled(): OllamaInstallStatus { * - Official method per https://winstall.app/apps/Ollama.Ollama * - Winget is pre-installed on Windows 10 (1709+) and Windows 11 * - * macOS/Linux: Uses official install script from https://ollama.com/download + * macOS: Uses Homebrew (most common package manager on macOS) + * - Official method: brew install ollama + * - Reference: https://ollama.com/download/mac + * + * Linux: Uses official install script from https://ollama.com/download * * @returns {string} The install command to run in terminal */ @@ -222,8 +226,13 @@ function getOllamaInstallCommand(): string { // This is an official installation method for Ollama on Windows // Reference: https://winstall.app/apps/Ollama.Ollama return 'winget install --id Ollama.Ollama --accept-source-agreements'; + } else if (process.platform === 'darwin') { + // macOS: Use Homebrew (most widely used package manager on macOS) + // Official Ollama installation method for macOS + // Reference: https://ollama.com/download/mac + return 'brew install ollama'; } else { - // macOS/Linux: Use shell script from official Ollama + // Linux: Use shell script from official Ollama // Reference: https://ollama.com/download return 'curl -fsSL https://ollama.com/install.sh | sh'; } @@ -296,6 +305,9 @@ async function executeOllamaDetector( let resolved = false; const proc = spawn(pythonExe, args, { stdio: ['ignore', 'pipe', 'pipe'], + // Use sanitized Python environment to prevent PYTHONHOME contamination + // Fixes "Could not find platform independent libraries" error on Windows + env: pythonEnvManager.getPythonEnv(), }); let stdout = ''; @@ -769,6 +781,9 @@ export function registerMemoryHandlers(): void { const proc = spawn(pythonExe, args, { stdio: ['ignore', 'pipe', 'pipe'], timeout: 600000, // 10 minute timeout for large models + // Use sanitized Python environment to prevent PYTHONHOME contamination + // Fixes "Could not find platform independent libraries" error on Windows + env: pythonEnvManager.getPythonEnv(), }); let stdout = ''; diff --git a/apps/frontend/src/main/ipc-handlers/profile-handlers.test.ts b/apps/frontend/src/main/ipc-handlers/profile-handlers.test.ts new file mode 100644 index 0000000000..0e115e4647 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/profile-handlers.test.ts @@ -0,0 +1,341 @@ +/** + * Tests for profile IPC handlers + * + * Tests profiles:set-active handler with support for: + * - Setting valid profile as active + * - Switching to OAuth (null profileId) + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import type { APIProfile, ProfilesFile } from '@shared/types/profile'; + +// Hoist mocked functions to avoid circular dependency in atomicModifyProfiles +const { mockedLoadProfilesFile, mockedSaveProfilesFile } = vi.hoisted(() => ({ + mockedLoadProfilesFile: vi.fn(), + mockedSaveProfilesFile: vi.fn() +})); + +// Mock electron before importing +vi.mock('electron', () => ({ + ipcMain: { + handle: vi.fn(), + on: vi.fn() + } +})); + +// Mock profile service +vi.mock('../services/profile', () => ({ + loadProfilesFile: mockedLoadProfilesFile, + saveProfilesFile: mockedSaveProfilesFile, + validateFilePermissions: vi.fn(), + getProfilesFilePath: vi.fn(() => '/test/profiles.json'), + createProfile: vi.fn(), + updateProfile: vi.fn(), + deleteProfile: vi.fn(), + testConnection: vi.fn(), + discoverModels: vi.fn(), + atomicModifyProfiles: vi.fn(async (modifier: (file: unknown) => unknown) => { + const file = await mockedLoadProfilesFile(); + const modified = modifier(file); + await mockedSaveProfilesFile(modified as never); + return modified; + }) +})); + +import { registerProfileHandlers } from './profile-handlers'; +import { ipcMain } from 'electron'; +import { IPC_CHANNELS } from '../../shared/constants'; +import { + loadProfilesFile, + saveProfilesFile, + validateFilePermissions, + testConnection +} from '../services/profile'; +import type { TestConnectionResult } from '@shared/types/profile'; + +// Get the handler function for testing +function getSetActiveHandler() { + const calls = (ipcMain.handle as unknown as ReturnType).mock.calls; + const setActiveCall = calls.find( + (call) => call[0] === IPC_CHANNELS.PROFILES_SET_ACTIVE + ); + return setActiveCall?.[1]; +} + +// Get the testConnection handler function for testing +function getTestConnectionHandler() { + const calls = (ipcMain.handle as unknown as ReturnType).mock.calls; + const testConnectionCall = calls.find( + (call) => call[0] === IPC_CHANNELS.PROFILES_TEST_CONNECTION + ); + return testConnectionCall?.[1]; +} + +describe('profile-handlers - setActiveProfile', () => { + beforeEach(() => { + vi.clearAllMocks(); + registerProfileHandlers(); + }); + const mockProfiles: APIProfile[] = [ + { + id: 'profile-1', + name: 'Test Profile 1', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key-1', + createdAt: Date.now(), + updatedAt: Date.now() + }, + { + id: 'profile-2', + name: 'Test Profile 2', + baseUrl: 'https://custom.api.com', + apiKey: 'sk-custom-key-2', + createdAt: Date.now(), + updatedAt: Date.now() + } + ]; + + describe('setting valid profile as active', () => { + it('should set active profile with valid profileId', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: null, + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(validateFilePermissions).mockResolvedValue(true); + + const handler = getSetActiveHandler(); + const result = await handler({}, 'profile-1'); + + expect(result).toEqual({ success: true }); + expect(saveProfilesFile).toHaveBeenCalledWith( + expect.objectContaining({ + activeProfileId: 'profile-1' + }) + ); + }); + + it('should return error for non-existent profile', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: null, + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const handler = getSetActiveHandler(); + const result = await handler({}, 'non-existent-id'); + + expect(result).toEqual({ + success: false, + error: 'Profile not found' + }); + }); + }); + + describe('switching to OAuth (null profileId)', () => { + it('should accept null profileId to switch to OAuth', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: 'profile-1', + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(validateFilePermissions).mockResolvedValue(true); + + const handler = getSetActiveHandler(); + const result = await handler({}, null); + + // Should succeed and clear activeProfileId + expect(result).toEqual({ success: true }); + expect(saveProfilesFile).toHaveBeenCalledWith( + expect.objectContaining({ + activeProfileId: null + }) + ); + }); + + it('should handle null when no profile was active', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: null, + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(validateFilePermissions).mockResolvedValue(true); + + const handler = getSetActiveHandler(); + const result = await handler({}, null); + + // Should succeed (idempotent operation) + expect(result).toEqual({ success: true }); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + }); + + describe('error handling', () => { + it('should handle loadProfilesFile errors', async () => { + vi.mocked(loadProfilesFile).mockRejectedValue( + new Error('Failed to load profiles') + ); + + const handler = getSetActiveHandler(); + const result = await handler({}, 'profile-1'); + + expect(result).toEqual({ + success: false, + error: 'Failed to load profiles' + }); + }); + + it('should handle saveProfilesFile errors', async () => { + const mockFile: ProfilesFile = { + profiles: mockProfiles, + activeProfileId: null, + version: 1 + }; + + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockRejectedValue( + new Error('Failed to save') + ); + + const handler = getSetActiveHandler(); + const result = await handler({}, 'profile-1'); + + expect(result).toEqual({ + success: false, + error: 'Failed to save' + }); + }); + }); +}); + +describe('profile-handlers - testConnection', () => { + beforeEach(() => { + vi.clearAllMocks(); + registerProfileHandlers(); + }); + + describe('successful connection tests', () => { + it('should return success result for valid connection', async () => { + const mockResult: TestConnectionResult = { + success: true, + message: 'Connection successful' + }; + + vi.mocked(testConnection).mockResolvedValue(mockResult); + + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: true, + data: mockResult + }); + expect(testConnection).toHaveBeenCalledWith( + 'https://api.anthropic.com', + 'sk-test-key-12chars', + expect.any(AbortSignal) + ); + }); + }); + + describe('input validation', () => { + it('should return error for empty baseUrl', async () => { + const handler = getTestConnectionHandler(); + const result = await handler({}, '', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + error: 'Base URL is required' + }); + expect(testConnection).not.toHaveBeenCalled(); + }); + + it('should return error for whitespace-only baseUrl', async () => { + const handler = getTestConnectionHandler(); + const result = await handler({}, ' ', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + error: 'Base URL is required' + }); + expect(testConnection).not.toHaveBeenCalled(); + }); + + it('should return error for empty apiKey', async () => { + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', ''); + + expect(result).toEqual({ + success: false, + error: 'API key is required' + }); + expect(testConnection).not.toHaveBeenCalled(); + }); + + it('should return error for whitespace-only apiKey', async () => { + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', ' '); + + expect(result).toEqual({ + success: false, + error: 'API key is required' + }); + expect(testConnection).not.toHaveBeenCalled(); + }); + }); + + describe('error handling', () => { + it('should return IPCResult with TestConnectionResult data for service errors', async () => { + const mockResult: TestConnectionResult = { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + + vi.mocked(testConnection).mockResolvedValue(mockResult); + + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', 'invalid-key'); + + expect(result).toEqual({ + success: true, + data: mockResult + }); + }); + + it('should return error for unexpected exceptions', async () => { + vi.mocked(testConnection).mockRejectedValue(new Error('Unexpected error')); + + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + error: 'Unexpected error' + }); + }); + + it('should return error for non-Error exceptions', async () => { + vi.mocked(testConnection).mockRejectedValue('String error'); + + const handler = getTestConnectionHandler(); + const result = await handler({}, 'https://api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + error: 'Failed to test connection' + }); + }); + }); +}); diff --git a/apps/frontend/src/main/ipc-handlers/profile-handlers.ts b/apps/frontend/src/main/ipc-handlers/profile-handlers.ts new file mode 100644 index 0000000000..6d4cfacbb7 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/profile-handlers.ts @@ -0,0 +1,358 @@ +/** + * Profile IPC Handlers + * + * IPC handlers for API profile management: + * - profiles:get - Get all profiles + * - profiles:save - Save/create a profile + * - profiles:update - Update an existing profile + * - profiles:delete - Delete a profile + * - profiles:setActive - Set active profile + * - profiles:test-connection - Test API profile connection + */ + +import { ipcMain } from 'electron'; +import { IPC_CHANNELS } from '../../shared/constants'; +import type { IPCResult } from '../../shared/types'; +import type { APIProfile, ProfileFormData, ProfilesFile, TestConnectionResult, DiscoverModelsResult } from '@shared/types/profile'; +import { + loadProfilesFile, + saveProfilesFile, + validateFilePermissions, + getProfilesFilePath, + atomicModifyProfiles, + createProfile, + updateProfile, + deleteProfile, + testConnection, + discoverModels +} from '../services/profile'; + +// Track active test connection requests for cancellation +const activeTestConnections = new Map(); + +// Track active discover models requests for cancellation +const activeDiscoverModelsRequests = new Map(); + +/** + * Register all profile-related IPC handlers + */ +export function registerProfileHandlers(): void { + /** + * Get all profiles + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_GET, + async (): Promise> => { + try { + const profiles = await loadProfilesFile(); + return { success: true, data: profiles }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to load profiles' + }; + } + } + ); + + /** + * Save/create a profile + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_SAVE, + async ( + _, + profileData: ProfileFormData + ): Promise> => { + try { + // Use createProfile from service layer (handles validation) + const newProfile = await createProfile(profileData); + + // Set file permissions to user-readable only + await validateFilePermissions(getProfilesFilePath()).catch((err) => { + console.warn('[profile-handlers] Failed to set secure file permissions:', err); + }); + + return { success: true, data: newProfile }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to save profile' + }; + } + } + ); + + /** + * Update an existing profile + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_UPDATE, + async (_, profileData: APIProfile): Promise> => { + try { + // Use updateProfile from service layer (handles validation) + const updatedProfile = await updateProfile({ + id: profileData.id, + name: profileData.name, + baseUrl: profileData.baseUrl, + apiKey: profileData.apiKey, + models: profileData.models + }); + + // Set file permissions to user-readable only + await validateFilePermissions(getProfilesFilePath()).catch((err) => { + console.warn('[profile-handlers] Failed to set secure file permissions:', err); + }); + + return { success: true, data: updatedProfile }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to update profile' + }; + } + } + ); + + /** + * Delete a profile + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_DELETE, + async (_, profileId: string): Promise => { + try { + // Use deleteProfile from service layer (handles validation) + await deleteProfile(profileId); + + return { success: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to delete profile' + }; + } + } + ); + + /** + * Set active profile + * - If profileId is provided, set that profile as active + * - If profileId is null, clear active profile (switch to OAuth) + * Uses atomic operation to prevent race conditions + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_SET_ACTIVE, + async (_, profileId: string | null): Promise => { + try { + await atomicModifyProfiles((file) => { + // If switching to OAuth (null), clear active profile + if (profileId === null) { + file.activeProfileId = null; + return file; + } + + // Check if profile exists + const profileExists = file.profiles.some((p) => p.id === profileId); + if (!profileExists) { + throw new Error('Profile not found'); + } + + // Set active profile + file.activeProfileId = profileId; + return file; + }); + + return { success: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to set active profile' + }; + } + } + ); + + /** + * Test API profile connection + * - Tests credentials by making a minimal API request + * - Returns detailed error information for different failure types + * - Includes configurable timeout (defaults to 15 seconds) + * - Supports cancellation via PROFILES_TEST_CONNECTION_CANCEL + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_TEST_CONNECTION, + async (_event, baseUrl: string, apiKey: string, requestId: number): Promise> => { + // Create AbortController for timeout and cancellation + const controller = new AbortController(); + const timeoutMs = 15000; // 15 seconds + + // Track this request for cancellation + activeTestConnections.set(requestId, controller); + + // Set timeout to abort the request + const timeoutId = setTimeout(() => { + controller.abort(); + }, timeoutMs); + + try { + // Validate inputs (null/empty checks) + if (!baseUrl || baseUrl.trim() === '') { + clearTimeout(timeoutId); + activeTestConnections.delete(requestId); + return { + success: false, + error: 'Base URL is required' + }; + } + + if (!apiKey || apiKey.trim() === '') { + clearTimeout(timeoutId); + activeTestConnections.delete(requestId); + return { + success: false, + error: 'API key is required' + }; + } + + // Call testConnection from service layer with abort signal + const result = await testConnection(baseUrl, apiKey, controller.signal); + + // Clear timeout on success + clearTimeout(timeoutId); + activeTestConnections.delete(requestId); + + return { success: true, data: result }; + } catch (error) { + // Clear timeout on error + clearTimeout(timeoutId); + activeTestConnections.delete(requestId); + + // Handle abort errors (timeout or explicit cancellation) + if (error instanceof Error && error.name === 'AbortError') { + return { + success: false, + error: 'Connection timeout. The request took too long to complete.' + }; + } + + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to test connection' + }; + } + } + ); + + /** + * Cancel an active test connection request + */ + ipcMain.on( + IPC_CHANNELS.PROFILES_TEST_CONNECTION_CANCEL, + (_event, requestId: number) => { + const controller = activeTestConnections.get(requestId); + if (controller) { + controller.abort(); + activeTestConnections.delete(requestId); + } + } + ); + + /** + * Discover available models from API endpoint + * - Fetches list of models from /v1/models endpoint + * - Returns model IDs and display names for dropdown selection + * - Supports cancellation via PROFILES_DISCOVER_MODELS_CANCEL + */ + ipcMain.handle( + IPC_CHANNELS.PROFILES_DISCOVER_MODELS, + async (_event, baseUrl: string, apiKey: string, requestId: number): Promise> => { + console.log('[discoverModels] Called with:', { baseUrl, requestId }); + + // Create AbortController for timeout and cancellation + const controller = new AbortController(); + const timeoutMs = 15000; // 15 seconds + + // Track this request for cancellation + activeDiscoverModelsRequests.set(requestId, controller); + + // Set timeout to abort the request + const timeoutId = setTimeout(() => { + controller.abort(); + }, timeoutMs); + + try { + // Validate inputs (null/empty checks) + if (!baseUrl || baseUrl.trim() === '') { + clearTimeout(timeoutId); + activeDiscoverModelsRequests.delete(requestId); + return { + success: false, + error: 'Base URL is required' + }; + } + + if (!apiKey || apiKey.trim() === '') { + clearTimeout(timeoutId); + activeDiscoverModelsRequests.delete(requestId); + return { + success: false, + error: 'API key is required' + }; + } + + // Call discoverModels from service layer with abort signal + const result = await discoverModels(baseUrl, apiKey, controller.signal); + + // Clear timeout on success + clearTimeout(timeoutId); + activeDiscoverModelsRequests.delete(requestId); + + return { success: true, data: result }; + } catch (error) { + // Clear timeout on error + clearTimeout(timeoutId); + activeDiscoverModelsRequests.delete(requestId); + + // Handle abort errors (timeout or explicit cancellation) + if (error instanceof Error && error.name === 'AbortError') { + return { + success: false, + error: 'Connection timeout. The request took too long to complete.' + }; + } + + // Extract error type if available + const errorType = (error as any).errorType; + const errorMessage = error instanceof Error ? error.message : 'Failed to discover models'; + + // Log for debugging + console.error('[discoverModels] Error:', { + name: error instanceof Error ? error.name : 'unknown', + message: errorMessage, + errorType, + originalError: error + }); + + // Include error type in error message for UI to handle appropriately + return { + success: false, + error: errorMessage + }; + } + } + ); + + /** + * Cancel an active discover models request + */ + ipcMain.on( + IPC_CHANNELS.PROFILES_DISCOVER_MODELS_CANCEL, + (_event, requestId: number) => { + const controller = activeDiscoverModelsRequests.get(requestId); + if (controller) { + controller.abort(); + activeDiscoverModelsRequests.delete(requestId); + } + } + ); +} diff --git a/apps/frontend/src/main/ipc-handlers/project-handlers.ts b/apps/frontend/src/main/ipc-handlers/project-handlers.ts index 4ca0eb726b..d752be8d7f 100644 --- a/apps/frontend/src/main/ipc-handlers/project-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/project-handlers.ts @@ -34,16 +34,56 @@ import { getEffectiveSourcePath } from '../updater/path-resolver'; // ============================================ /** - * Get list of git branches for a directory + * Get list of git branches for a directory (both local and remote) */ function getGitBranches(projectPath: string): string[] { try { - const result = execFileSync(getToolPath('git'), ['branch', '--list', '--format=%(refname:short)'], { + // First fetch to ensure we have latest remote refs + try { + execFileSync(getToolPath('git'), ['fetch', '--prune'], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000 // 10 second timeout for fetch + }); + } catch { + // Fetch may fail if offline or no remote, continue with local refs + } + + // Get all branches (local + remote) using --all flag + const result = execFileSync(getToolPath('git'), ['branch', '--all', '--format=%(refname:short)'], { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }); - return result.trim().split('\n').filter(b => b.trim()); + + const branches = result.trim().split('\n') + .filter(b => b.trim()) + .map(b => { + // Remote branches come as "origin/branch-name", keep the full name + // but remove the "origin/" prefix for display while keeping it usable + return b.trim(); + }) + // Remove HEAD pointer entries like "origin/HEAD" + .filter(b => !b.endsWith('/HEAD')) + // Remove duplicates (local branch may exist alongside remote) + .filter((branch, index, self) => { + // If it's a remote branch (origin/x) and local version exists, keep local + if (branch.startsWith('origin/')) { + const localName = branch.replace('origin/', ''); + return !self.includes(localName); + } + return self.indexOf(branch) === index; + }); + + // Sort: local branches first, then remote branches + return branches.sort((a, b) => { + const aIsRemote = a.startsWith('origin/'); + const bIsRemote = b.startsWith('origin/'); + if (aIsRemote && !bIsRemote) return 1; + if (!aIsRemote && bIsRemote) return -1; + return a.localeCompare(b); + }); } catch { return []; } diff --git a/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts b/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts index 0eb8b3aa13..62f9faee98 100644 --- a/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts +++ b/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts @@ -96,6 +96,57 @@ function transformPhase(raw: RawRoadmapPhase): RoadmapPhase { }; } +/** + * Maps all known backend status values to canonical Kanban column statuses. + * Includes valid statuses as identity mappings for consistent lookup. + * Module-level constant for efficiency (not recreated on each call). + */ +const STATUS_MAP: Record = { + // Canonical Kanban statuses (identity mappings) + 'under_review': 'under_review', + 'planned': 'planned', + 'in_progress': 'in_progress', + 'done': 'done', + // Early-stage / ideation statuses โ†’ under_review + 'idea': 'under_review', + 'backlog': 'under_review', + 'proposed': 'under_review', + 'pending': 'under_review', + // Approved / scheduled statuses โ†’ planned + 'approved': 'planned', + 'scheduled': 'planned', + // Active development statuses โ†’ in_progress + 'active': 'in_progress', + 'building': 'in_progress', + // Completed statuses โ†’ done + 'complete': 'done', + 'completed': 'done', + 'shipped': 'done' +}; + +/** + * Normalizes a feature status string to a valid Kanban column status. + * Handles case-insensitive matching and maps backend values to canonical statuses. + * + * @param status - The raw status string from the backend + * @returns A valid RoadmapFeature status for Kanban display + */ +function normalizeFeatureStatus(status: string | undefined): RoadmapFeature['status'] { + if (!status) return 'under_review'; + + const normalized = STATUS_MAP[status.toLowerCase()]; + + if (!normalized) { + // Debug log for unmapped statuses to aid future mapping additions + if (process.env.NODE_ENV === 'development') { + console.debug(`[Roadmap] normalizeFeatureStatus: unmapped status "${status}", defaulting to "under_review"`); + } + return 'under_review'; + } + + return normalized; +} + function transformFeature(raw: RawRoadmapFeature): RoadmapFeature { return { id: raw.id, @@ -107,7 +158,7 @@ function transformFeature(raw: RawRoadmapFeature): RoadmapFeature { impact: (raw.impact as RoadmapFeature['impact']) || 'medium', phaseId: raw.phase_id || raw.phaseId || '', dependencies: raw.dependencies || [], - status: (raw.status as RoadmapFeature['status']) || 'under_review', + status: normalizeFeatureStatus(raw.status), acceptanceCriteria: raw.acceptance_criteria || raw.acceptanceCriteria || [], userStories: raw.user_stories || raw.userStories || [], linkedSpecId: raw.linked_spec_id || raw.linkedSpecId, @@ -115,6 +166,7 @@ function transformFeature(raw: RawRoadmapFeature): RoadmapFeature { }; } + export function transformRoadmapFromSnakeCase( raw: RawRoadmap, projectId: string, diff --git a/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt b/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt index 5432d01173..ff5bb4bd42 100644 --- a/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt +++ b/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt @@ -304,9 +304,10 @@ ${existingVars['GRAPHITI_DATABASE'] ? `GRAPHITI_DATABASE=${existingVars['GRAPHIT try { // Check if Claude CLI is available and authenticated const result = await new Promise((resolve) => { - const proc = spawn('claude', ['--version'], { + const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation(); + const proc = spawn(claudeCmd, ['--version'], { cwd: project.path, - env: { ...process.env }, + env: claudeEnv, shell: true }); @@ -325,9 +326,9 @@ ${existingVars['GRAPHITI_DATABASE'] ? `GRAPHITI_DATABASE=${existingVars['GRAPHIT if (code === 0) { // Claude CLI is available, check if authenticated // Run a simple command that requires auth - const authCheck = spawn('claude', ['api', '--help'], { + const authCheck = spawn(claudeCmd, ['api', '--help'], { cwd: project.path, - env: { ...process.env }, + env: claudeEnv, shell: true }); @@ -384,9 +385,10 @@ ${existingVars['GRAPHITI_DATABASE'] ? `GRAPHITI_DATABASE=${existingVars['GRAPHIT try { // Run claude setup-token which will open browser for OAuth const result = await new Promise((resolve) => { - const proc = spawn('claude', ['setup-token'], { + const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation(); + const proc = spawn(claudeCmd, ['setup-token'], { cwd: project.path, - env: { ...process.env }, + env: claudeEnv, shell: true, stdio: 'inherit' // This allows the terminal to handle the interactive auth }); diff --git a/apps/frontend/src/main/ipc-handlers/settings-handlers.ts b/apps/frontend/src/main/ipc-handlers/settings-handlers.ts index d6e7b94ff4..9aecfca97d 100644 --- a/apps/frontend/src/main/ipc-handlers/settings-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/settings-handlers.ts @@ -1,19 +1,21 @@ import { ipcMain, dialog, app, shell } from 'electron'; -import { existsSync, writeFileSync, mkdirSync, statSync } from 'fs'; +import { existsSync, writeFileSync, mkdirSync, statSync, readFileSync } from 'fs'; import { execFileSync } from 'node:child_process'; import path from 'path'; import { is } from '@electron-toolkit/utils'; import { IPC_CHANNELS, DEFAULT_APP_SETTINGS, DEFAULT_AGENT_PROFILES } from '../../shared/constants'; import type { AppSettings, - IPCResult + IPCResult, + SourceEnvConfig, + SourceEnvCheckResult } from '../../shared/types'; import { AgentManager } from '../agent'; import type { BrowserWindow } from 'electron'; -import { getEffectiveVersion } from '../auto-claude-updater'; -import { setUpdateChannel } from '../app-updater'; +import { setUpdateChannel, setUpdateChannelWithDowngradeCheck } from '../app-updater'; import { getSettingsPath, readSettingsFile } from '../settings-utils'; -import { configureTools, getToolPath, getToolInfo, isPathFromWrongPlatform } from '../cli-tool-manager'; +import { configureTools, getToolPath, getToolInfo, isPathFromWrongPlatform, preWarmToolCache } from '../cli-tool-manager'; +import { parseEnvFile } from './utils'; const settingsPath = getSettingsPath(); @@ -34,13 +36,16 @@ const detectAutoBuildSourcePath = (): string | null => { ); } else { // Production mode paths (packaged app) - // On Windows/Linux/macOS, the app might be installed anywhere - // We check common locations relative to the app bundle + // The backend is bundled as extraResources/backend + // On all platforms, it should be at process.resourcesPath/backend + possiblePaths.push( + path.resolve(process.resourcesPath, 'backend') // Primary: extraResources/backend + ); + // Fallback paths for different app structures const appPath = app.getAppPath(); possiblePaths.push( - path.resolve(appPath, '..', 'backend'), // Sibling to app - path.resolve(appPath, '..', '..', 'backend'), // Up 2 from app - path.resolve(process.resourcesPath, '..', 'backend') // Relative to resources + path.resolve(appPath, '..', 'backend'), // Sibling to asar + path.resolve(appPath, '..', '..', 'Resources', 'backend') // macOS bundle structure ); } @@ -166,6 +171,11 @@ export function registerSettingsHandlers( claudePath: settings.claudePath, }); + // Re-warm cache asynchronously after configuring (non-blocking) + preWarmToolCache(['claude']).catch((error) => { + console.warn('[SETTINGS_GET] Failed to re-warm CLI cache:', error); + }); + return { success: true, data: settings as AppSettings }; } ); @@ -207,12 +217,25 @@ export function registerSettingsHandlers( githubCLIPath: newSettings.githubCLIPath, claudePath: newSettings.claudePath, }); + + // Re-warm cache asynchronously after configuring (non-blocking) + preWarmToolCache(['claude']).catch((error) => { + console.warn('[SETTINGS_SAVE] Failed to re-warm CLI cache:', error); + }); } // Update auto-updater channel if betaUpdates setting changed if (settings.betaUpdates !== undefined) { - const channel = settings.betaUpdates ? 'beta' : 'latest'; - setUpdateChannel(channel); + if (settings.betaUpdates) { + // Enabling beta updates - just switch channel + setUpdateChannel('beta'); + } else { + // Disabling beta updates - switch to stable and check if downgrade is available + // This will notify the renderer if user is on a prerelease and stable version exists + setUpdateChannelWithDowngradeCheck('latest', true).catch((err) => { + console.error('[settings-handlers] Failed to check for stable downgrade:', err); + }); + } } return { success: true }; @@ -372,8 +395,8 @@ export function registerSettingsHandlers( // ============================================ ipcMain.handle(IPC_CHANNELS.APP_VERSION, async (): Promise => { - // Use effective version which accounts for source updates - const version = getEffectiveVersion(); + // Return the actual bundled version from package.json + const version = app.getVersion(); console.log('[settings-handlers] APP_VERSION returning:', version); return version; }); @@ -499,4 +522,238 @@ export function registerSettingsHandlers( } } ); + + // ============================================ + // Auto-Build Source Environment Operations + // ============================================ + + /** + * Helper to get source .env path from settings + * + * In production mode, the .env file is NOT bundled (excluded in electron-builder config). + * We store the source .env in app userData directory instead, which is writable. + * The sourcePath points to the bundled backend for reference, but envPath is in userData. + */ + const getSourceEnvPath = (): { + sourcePath: string | null; + envPath: string | null; + isProduction: boolean; + } => { + const savedSettings = readSettingsFile(); + const settings = { ...DEFAULT_APP_SETTINGS, ...savedSettings }; + + // Get autoBuildPath from settings or try to auto-detect + let sourcePath: string | null = settings.autoBuildPath || null; + if (!sourcePath) { + sourcePath = detectAutoBuildSourcePath(); + } + + if (!sourcePath) { + return { sourcePath: null, envPath: null, isProduction: !is.dev }; + } + + // In production, use userData directory for .env since resources may be read-only + // In development, use the actual source path + let envPath: string; + if (is.dev) { + envPath = path.join(sourcePath, '.env'); + } else { + // Production: store .env in userData/backend/.env + const userDataBackendDir = path.join(app.getPath('userData'), 'backend'); + if (!existsSync(userDataBackendDir)) { + mkdirSync(userDataBackendDir, { recursive: true }); + } + envPath = path.join(userDataBackendDir, '.env'); + } + + return { + sourcePath, + envPath, + isProduction: !is.dev + }; + }; + + ipcMain.handle( + IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_GET, + async (): Promise> => { + try { + const { sourcePath, envPath } = getSourceEnvPath(); + + // Load global settings to check for global token fallback + const savedSettings = readSettingsFile(); + const globalSettings = { ...DEFAULT_APP_SETTINGS, ...savedSettings }; + + if (!sourcePath) { + // Even without source path, check global token + const globalToken = globalSettings.globalClaudeOAuthToken; + return { + success: true, + data: { + hasClaudeToken: !!globalToken && globalToken.length > 0, + claudeOAuthToken: globalToken, + envExists: false + } + }; + } + + const envExists = envPath ? existsSync(envPath) : false; + let hasClaudeToken = false; + let claudeOAuthToken: string | undefined; + + // First, check source .env file + if (envExists && envPath) { + const content = readFileSync(envPath, 'utf-8'); + const vars = parseEnvFile(content); + claudeOAuthToken = vars['CLAUDE_CODE_OAUTH_TOKEN']; + hasClaudeToken = !!claudeOAuthToken && claudeOAuthToken.length > 0; + } + + // Fallback to global settings if no token in source .env + if (!hasClaudeToken && globalSettings.globalClaudeOAuthToken) { + claudeOAuthToken = globalSettings.globalClaudeOAuthToken; + hasClaudeToken = true; + } + + return { + success: true, + data: { + hasClaudeToken, + claudeOAuthToken, + sourcePath, + envExists + } + }; + } catch (error) { + // Log the error for debugging in production + console.error('[AUTOBUILD_SOURCE_ENV_GET] Error:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to get source env' + }; + } + } + ); + + ipcMain.handle( + IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_UPDATE, + async (_, config: { claudeOAuthToken?: string }): Promise => { + try { + const { sourcePath, envPath } = getSourceEnvPath(); + + if (!sourcePath || !envPath) { + return { + success: false, + error: 'Auto-build source path not configured. Please set it in Settings.' + }; + } + + // Read existing content or start fresh (avoiding TOCTOU race condition) + let existingVars: Record = {}; + try { + const content = readFileSync(envPath, 'utf-8'); + existingVars = parseEnvFile(content); + } catch (_readError) { + // File doesn't exist or can't be read - start with empty vars + // This is expected for first-time setup + } + + // Update with new values + if (config.claudeOAuthToken !== undefined) { + existingVars['CLAUDE_CODE_OAUTH_TOKEN'] = config.claudeOAuthToken; + } + + // Generate content + const lines: string[] = [ + '# Auto Claude Framework Environment Variables', + '# Managed by Auto Claude UI', + '', + '# Claude Code OAuth Token (REQUIRED)', + `CLAUDE_CODE_OAUTH_TOKEN=${existingVars['CLAUDE_CODE_OAUTH_TOKEN'] || ''}`, + '' + ]; + + // Preserve other existing variables + for (const [key, value] of Object.entries(existingVars)) { + if (key !== 'CLAUDE_CODE_OAUTH_TOKEN') { + lines.push(`${key}=${value}`); + } + } + + writeFileSync(envPath, lines.join('\n')); + + return { success: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to update source env' + }; + } + } + ); + + ipcMain.handle( + IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN, + async (): Promise> => { + try { + const { sourcePath, envPath, isProduction } = getSourceEnvPath(); + + // Load global settings to check for global token fallback + const savedSettings = readSettingsFile(); + const globalSettings = { ...DEFAULT_APP_SETTINGS, ...savedSettings }; + + // Check global token first as it's the primary method + const globalToken = globalSettings.globalClaudeOAuthToken; + const hasGlobalToken = !!globalToken && globalToken.length > 0; + + if (!sourcePath) { + // In production, no source path is acceptable if global token exists + if (hasGlobalToken) { + return { + success: true, + data: { + hasToken: true, + sourcePath: isProduction ? app.getPath('userData') : undefined + } + }; + } + return { + success: true, + data: { + hasToken: false, + error: isProduction + ? 'Please configure Claude OAuth token in Settings > API Configuration' + : 'Auto-build source path not configured' + } + }; + } + + // Check source .env file + let hasEnvToken = false; + if (envPath && existsSync(envPath)) { + const content = readFileSync(envPath, 'utf-8'); + const vars = parseEnvFile(content); + const token = vars['CLAUDE_CODE_OAUTH_TOKEN']; + hasEnvToken = !!token && token.length > 0; + } + + // Token exists if either source .env has it OR global settings has it + const hasToken = hasEnvToken || hasGlobalToken; + + return { + success: true, + data: { + hasToken, + sourcePath + } + }; + } catch (error) { + // Log the error for debugging in production + console.error('[AUTOBUILD_SOURCE_ENV_CHECK_TOKEN] Error:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to check source token' + }; + } + } + ); } diff --git a/apps/frontend/src/main/ipc-handlers/shared/label-utils.ts b/apps/frontend/src/main/ipc-handlers/shared/label-utils.ts new file mode 100644 index 0000000000..d51ee6fbdd --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/shared/label-utils.ts @@ -0,0 +1,34 @@ +/** + * Shared label matching utilities + * Used by both GitHub and GitLab spec-utils for category detection + */ + +/** + * Escape special regex characters in a string. + * This ensures that terms like "c++" or "c#" are matched literally. + * + * @param str - The string to escape + * @returns The escaped string safe for use in a RegExp + */ +function escapeRegExp(str: string): string { + return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); +} + +/** + * Check if a label contains a whole-word match for a term. + * Uses word boundaries to prevent false positives (e.g., 'acid' matching 'ci'). + * + * The term is escaped to handle regex metacharacters safely, so terms like + * "c++" or "c#" are matched literally rather than being interpreted as regex. + * + * @param label - The label to check (already lowercased) + * @param term - The term to search for (will be escaped for regex safety) + * @returns true if the label contains the term as a whole word + */ +export function labelMatchesWholeWord(label: string, term: string): boolean { + // Escape regex metacharacters in the term to match literally + const escapedTerm = escapeRegExp(term); + // Use word boundary regex to match whole words only + const regex = new RegExp(`\\b${escapedTerm}\\b`); + return regex.test(label); +} diff --git a/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts b/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts index 232f54bedf..50049f06e8 100644 --- a/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts @@ -194,6 +194,9 @@ export function registerTaskCRUDHandlers(agentManager: AgentManager): void { updatedAt: new Date() }; + // Invalidate cache since a new task was created + projectStore.invalidateTasksCache(projectId); + return { success: true, data: task }; } ); @@ -230,6 +233,10 @@ export function registerTaskCRUDHandlers(agentManager: AgentManager): void { } else { console.warn(`[TASK_DELETE] Spec directory not found: ${specDir}`); } + + // Invalidate cache since a task was deleted + projectStore.invalidateTasksCache(project.id); + return { success: true }; } catch (error) { console.error('[TASK_DELETE] Error deleting spec directory:', error); @@ -418,6 +425,9 @@ export function registerTaskCRUDHandlers(agentManager: AgentManager): void { updatedAt: new Date() }; + // Invalidate cache since a task was updated + projectStore.invalidateTasksCache(project.id); + return { success: true, data: updatedTask }; } catch (error) { return { diff --git a/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts b/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts index 1e0ce9ba52..1626190f76 100644 --- a/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts @@ -2,7 +2,7 @@ import { ipcMain, BrowserWindow } from 'electron'; import { IPC_CHANNELS, AUTO_BUILD_PATHS, getSpecsDir } from '../../../shared/constants'; import type { IPCResult, TaskStartOptions, TaskStatus } from '../../../shared/types'; import path from 'path'; -import { existsSync, readFileSync, writeFileSync, mkdirSync, renameSync, unlinkSync } from 'fs'; +import { existsSync, readFileSync, writeFileSync, renameSync, unlinkSync } from 'fs'; import { spawnSync } from 'child_process'; import { AgentManager } from '../../agent'; import { fileWatcher } from '../../file-watcher'; @@ -12,9 +12,10 @@ import { getClaudeProfileManager } from '../../claude-profile-manager'; import { getPlanPath, persistPlanStatus, - persistPlanStatusSync, createPlanIfNotExists } from './plan-file-utils'; +import { findTaskWorktree } from '../../worktree-paths'; +import { projectStore } from '../../project-store'; /** * Atomic file write to prevent TOCTOU race conditions. @@ -192,7 +193,8 @@ export function registerTaskExecutionHandlers( { parallel: false, // Sequential for planning phase workers: 1, - baseBranch + baseBranch, + useWorktree: task.metadata?.useWorktree } ); } else { @@ -207,7 +209,8 @@ export function registerTaskExecutionHandlers( { parallel: false, workers: 1, - baseBranch + baseBranch, + useWorktree: task.metadata?.useWorktree } ); } @@ -236,7 +239,7 @@ export function registerTaskExecutionHandlers( setImmediate(async () => { const persistStart = Date.now(); try { - const persisted = await persistPlanStatus(planPath, 'in_progress'); + const persisted = await persistPlanStatus(planPath, 'in_progress', project.id); if (persisted) { console.warn('[TASK_START] Updated plan status to: in_progress'); } @@ -288,7 +291,7 @@ export function registerTaskExecutionHandlers( setImmediate(async () => { const persistStart = Date.now(); try { - const persisted = await persistPlanStatus(planPath, 'backlog'); + const persisted = await persistPlanStatus(planPath, 'backlog', project.id); if (persisted) { console.warn('[TASK_STOP] Updated plan status to backlog'); } @@ -332,9 +335,9 @@ export function registerTaskExecutionHandlers( ); // Check if worktree exists - QA needs to run in the worktree where the build happened - const worktreePath = path.join(project.path, '.worktrees', task.specId); - const worktreeSpecDir = path.join(worktreePath, specsBaseDir, task.specId); - const hasWorktree = existsSync(worktreePath); + const worktreePath = findTaskWorktree(project.path, task.specId); + const worktreeSpecDir = worktreePath ? path.join(worktreePath, specsBaseDir, task.specId) : null; + const hasWorktree = worktreePath !== null; if (approved) { // Write approval to QA report @@ -382,14 +385,14 @@ export function registerTaskExecutionHandlers( } // Step 3: Clean untracked files that came from the merge - // IMPORTANT: Exclude .auto-claude and .worktrees directories to preserve specs and worktree data - const cleanResult = spawnSync('git', ['clean', '-fd', '-e', '.auto-claude', '-e', '.worktrees'], { + // IMPORTANT: Exclude .auto-claude directory to preserve specs and worktree data + const cleanResult = spawnSync('git', ['clean', '-fd', '-e', '.auto-claude'], { cwd: project.path, encoding: 'utf-8', stdio: 'pipe' }); if (cleanResult.status === 0) { - console.log('[TASK_REVIEW] Cleaned untracked files in main (excluding .auto-claude and .worktrees)'); + console.log('[TASK_REVIEW] Cleaned untracked files in main (excluding .auto-claude)'); } console.log('[TASK_REVIEW] Main branch restored to pre-merge state'); @@ -397,7 +400,7 @@ export function registerTaskExecutionHandlers( // Write feedback for QA fixer - write to WORKTREE spec dir if it exists // The QA process runs in the worktree where the build and implementation_plan.json are - const targetSpecDir = hasWorktree ? worktreeSpecDir : specDir; + const targetSpecDir = hasWorktree && worktreeSpecDir ? worktreeSpecDir : specDir; const fixRequestPath = path.join(targetSpecDir, 'QA_FIX_REQUEST.md'); console.warn('[TASK_REVIEW] Writing QA fix request to:', fixRequestPath); @@ -453,9 +456,9 @@ export function registerTaskExecutionHandlers( // Validate status transition - 'done' can only be set through merge handler // UNLESS there's no worktree (limbo state - already merged/discarded or failed) if (status === 'done') { - // Check if worktree exists - const worktreePath = path.join(project.path, '.worktrees', taskId); - const hasWorktree = existsSync(worktreePath); + // Check if worktree exists (task.specId matches worktree folder name) + const worktreePath = findTaskWorktree(project.path, task.specId); + const hasWorktree = worktreePath !== null; if (hasWorktree) { // Worktree exists - must use merge workflow @@ -508,11 +511,13 @@ export function registerTaskExecutionHandlers( try { // Use shared utility for thread-safe plan file updates - const persisted = await persistPlanStatus(planPath, status); + const persisted = await persistPlanStatus(planPath, status, project.id); if (!persisted) { // If no implementation plan exists yet, create a basic one await createPlanIfNotExists(planPath, task, status); + // Invalidate cache after creating new plan + projectStore.invalidateTasksCache(project.id); } // Auto-stop task when status changes AWAY from 'in_progress' and process IS running @@ -585,7 +590,8 @@ export function registerTaskExecutionHandlers( { parallel: false, workers: 1, - baseBranch: baseBranchForUpdate + baseBranch: baseBranchForUpdate, + useWorktree: task.metadata?.useWorktree } ); } else { @@ -599,7 +605,8 @@ export function registerTaskExecutionHandlers( { parallel: false, workers: 1, - baseBranch: baseBranchForUpdate + baseBranch: baseBranchForUpdate, + useWorktree: task.metadata?.useWorktree } ); } @@ -671,17 +678,35 @@ export function registerTaskExecutionHandlers( return { success: false, error: 'Task not found' }; } - // Get the spec directory - const autoBuildDir = project.autoBuildPath || '.auto-claude'; - const specDir = path.join( + // Get the spec directory - use task.specsPath if available (handles worktree vs main) + // This is critical: task might exist in worktree, and getTasks() prefers worktree version. + // If we write to main project but task is in worktree, the worktree's old status takes precedence on refresh. + const specDir = task.specsPath || path.join( project.path, - autoBuildDir, - 'specs', + getSpecsDir(project.autoBuildPath), task.specId ); // Update implementation_plan.json const planPath = path.join(specDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN); + console.log(`[Recovery] Writing to plan file at: ${planPath} (task location: ${task.location || 'main'})`); + + // Also update the OTHER location if task exists in both main and worktree + // This ensures consistency regardless of which version getTasks() prefers + const specsBaseDir = getSpecsDir(project.autoBuildPath); + const mainSpecDir = path.join(project.path, specsBaseDir, task.specId); + const worktreePath = findTaskWorktree(project.path, task.specId); + const worktreeSpecDir = worktreePath ? path.join(worktreePath, specsBaseDir, task.specId) : null; + + // Collect all plan file paths that need updating + const planPathsToUpdate: string[] = [planPath]; + if (mainSpecDir !== specDir && existsSync(path.join(mainSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN))) { + planPathsToUpdate.push(path.join(mainSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN)); + } + if (worktreeSpecDir && worktreeSpecDir !== specDir && existsSync(path.join(worktreeSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN))) { + planPathsToUpdate.push(path.join(worktreeSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN)); + } + console.log(`[Recovery] Will update ${planPathsToUpdate.length} plan file(s):`, planPathsToUpdate); try { // Read the plan to analyze subtask progress @@ -743,14 +768,25 @@ export function registerTaskExecutionHandlers( // Just update status in plan file (project store reads from file, no separate update needed) plan.status = 'human_review'; plan.planStatus = 'review'; - try { - // Use atomic write to prevent TOCTOU race conditions - atomicWriteFileSync(planPath, JSON.stringify(plan, null, 2)); - } catch (writeError) { - console.error('[Recovery] Failed to write plan file:', writeError); + + // Write to ALL plan file locations to ensure consistency + const planContent = JSON.stringify(plan, null, 2); + let writeSucceededForComplete = false; + for (const pathToUpdate of planPathsToUpdate) { + try { + atomicWriteFileSync(pathToUpdate, planContent); + console.log(`[Recovery] Successfully wrote to: ${pathToUpdate}`); + writeSucceededForComplete = true; + } catch (writeError) { + console.error(`[Recovery] Failed to write plan file at ${pathToUpdate}:`, writeError); + // Continue trying other paths + } + } + + if (!writeSucceededForComplete) { return { success: false, - error: 'Failed to write plan file' + error: 'Failed to write plan file during recovery (all locations failed)' }; } @@ -797,11 +833,19 @@ export function registerTaskExecutionHandlers( } } - try { - // Use atomic write to prevent TOCTOU race conditions - atomicWriteFileSync(planPath, JSON.stringify(plan, null, 2)); - } catch (writeError) { - console.error('[Recovery] Failed to write plan file:', writeError); + // Write to ALL plan file locations to ensure consistency + const planContent = JSON.stringify(plan, null, 2); + let writeSucceeded = false; + for (const pathToUpdate of planPathsToUpdate) { + try { + atomicWriteFileSync(pathToUpdate, planContent); + console.log(`[Recovery] Successfully wrote to: ${pathToUpdate}`); + writeSucceeded = true; + } catch (writeError) { + console.error(`[Recovery] Failed to write plan file at ${pathToUpdate}:`, writeError); + } + } + if (!writeSucceeded) { return { success: false, error: 'Failed to write plan file during recovery' @@ -853,17 +897,20 @@ export function registerTaskExecutionHandlers( // Set status to in_progress for the restart newStatus = 'in_progress'; - // Update plan status for restart + // Update plan status for restart - write to ALL locations if (plan) { plan.status = 'in_progress'; plan.planStatus = 'in_progress'; - try { - // Use atomic write to prevent TOCTOU race conditions - atomicWriteFileSync(planPath, JSON.stringify(plan, null, 2)); - } catch (writeError) { - console.error('[Recovery] Failed to write plan file for restart:', writeError); - // Continue with restart attempt even if file write fails - // The plan status will be updated by the agent when it starts + const restartPlanContent = JSON.stringify(plan, null, 2); + for (const pathToUpdate of planPathsToUpdate) { + try { + atomicWriteFileSync(pathToUpdate, restartPlanContent); + console.log(`[Recovery] Wrote restart status to: ${pathToUpdate}`); + } catch (writeError) { + console.error(`[Recovery] Failed to write plan file for restart at ${pathToUpdate}:`, writeError); + // Continue with restart attempt even if file write fails + // The plan status will be updated by the agent when it starts + } } } @@ -896,7 +943,8 @@ export function registerTaskExecutionHandlers( { parallel: false, workers: 1, - baseBranch: baseBranchForRecovery + baseBranch: baseBranchForRecovery, + useWorktree: task.metadata?.useWorktree } ); } diff --git a/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts b/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts index 6d810f3aea..933d0c5a00 100644 --- a/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts +++ b/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts @@ -21,6 +21,7 @@ import path from 'path'; import { readFileSync, writeFileSync, mkdirSync } from 'fs'; import { AUTO_BUILD_PATHS, getSpecsDir } from '../../../shared/constants'; import type { TaskStatus, Project, Task } from '../../../shared/types'; +import { projectStore } from '../../project-store'; // In-memory locks for plan file operations // Key: plan file path, Value: Promise chain for serializing operations @@ -93,9 +94,10 @@ export function mapStatusToPlanStatus(status: TaskStatus): string { * * @param planPath - Path to the implementation_plan.json file * @param status - The TaskStatus to persist + * @param projectId - Optional project ID to invalidate cache (recommended for performance) * @returns true if status was persisted, false if plan file doesn't exist */ -export async function persistPlanStatus(planPath: string, status: TaskStatus): Promise { +export async function persistPlanStatus(planPath: string, status: TaskStatus, projectId?: string): Promise { return withPlanLock(planPath, async () => { try { // Read file directly without existence check to avoid TOCTOU race condition @@ -107,6 +109,12 @@ export async function persistPlanStatus(planPath: string, status: TaskStatus): P plan.updated_at = new Date().toISOString(); writeFileSync(planPath, JSON.stringify(plan, null, 2)); + + // Invalidate tasks cache since status changed + if (projectId) { + projectStore.invalidateTasksCache(projectId); + } + return true; } catch (err) { // File not found is expected - return false @@ -141,9 +149,10 @@ export async function persistPlanStatus(planPath: string, status: TaskStatus): P * * @param planPath - Path to the implementation_plan.json file * @param status - The TaskStatus to persist + * @param projectId - Optional project ID to invalidate cache (recommended for performance) * @returns true if status was persisted, false otherwise */ -export function persistPlanStatusSync(planPath: string, status: TaskStatus): boolean { +export function persistPlanStatusSync(planPath: string, status: TaskStatus, projectId?: string): boolean { try { // Read file directly without existence check to avoid TOCTOU race condition const planContent = readFileSync(planPath, 'utf-8'); @@ -154,6 +163,12 @@ export function persistPlanStatusSync(planPath: string, status: TaskStatus): boo plan.updated_at = new Date().toISOString(); writeFileSync(planPath, JSON.stringify(plan, null, 2)); + + // Invalidate tasks cache since status changed + if (projectId) { + projectStore.invalidateTasksCache(projectId); + } + return true; } catch (err) { // File not found is expected - return false diff --git a/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts b/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts index a9edf89c6f..8c97ae76e4 100644 --- a/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts @@ -4,14 +4,19 @@ import type { IPCResult, WorktreeStatus, WorktreeDiff, WorktreeDiffFile, Worktre import path from 'path'; import { existsSync, readdirSync, statSync, readFileSync } from 'fs'; import { execSync, execFileSync, spawn, spawnSync, exec, execFile } from 'child_process'; +import { minimatch } from 'minimatch'; import { projectStore } from '../../project-store'; import { getConfiguredPythonPath, PythonEnvManager, pythonEnvManager as pythonEnvManagerSingleton } from '../../python-env-manager'; -import { getEffectiveSourcePath } from '../../auto-claude-updater'; +import { getEffectiveSourcePath } from '../../updater/path-resolver'; import { getProfileEnv } from '../../rate-limit-detector'; import { findTaskAndProject } from './shared'; import { parsePythonCommand } from '../../python-detector'; import { getToolPath } from '../../cli-tool-manager'; import { promisify } from 'util'; +import { + getTaskWorktreeDir, + findTaskWorktree, +} from '../../worktree-paths'; /** * Read utility feature settings (for commit message, merge resolver) from settings file @@ -55,6 +60,145 @@ function getUtilitySettings(): { model: string; modelId: string; thinkingLevel: const execAsync = promisify(exec); const execFileAsync = promisify(execFile); +/** + * Check if a repository is misconfigured as bare but has source files. + * If so, automatically fix the configuration by unsetting core.bare. + * + * This can happen when git worktree operations incorrectly set bare=true, + * or when users manually misconfigure the repository. + * + * @param projectPath - Path to check and potentially fix + * @returns true if fixed, false if no fix needed or not fixable + */ +function fixMisconfiguredBareRepo(projectPath: string): boolean { + try { + // Check if bare=true is set + const bareConfig = execFileSync( + getToolPath('git'), + ['config', '--get', 'core.bare'], + { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ).trim().toLowerCase(); + + if (bareConfig !== 'true') { + return false; // Not marked as bare, nothing to fix + } + + // Check if there are source files (indicating misconfiguration) + // A truly bare repo would only have git internals, not source code + // This covers multiple ecosystems: JS/TS, Python, Rust, Go, Java, C#, etc. + // + // Markers are separated into exact matches and glob patterns for efficiency. + // Exact matches use existsSync() directly, while glob patterns use minimatch + // against a cached directory listing. + const EXACT_MARKERS = [ + // JavaScript/TypeScript ecosystem + 'package.json', 'apps', 'src', + // Python ecosystem + 'pyproject.toml', 'setup.py', 'requirements.txt', 'Pipfile', + // Rust ecosystem + 'Cargo.toml', + // Go ecosystem + 'go.mod', 'go.sum', 'cmd', 'main.go', + // Java/JVM ecosystem + 'pom.xml', 'build.gradle', 'build.gradle.kts', + // Ruby ecosystem + 'Gemfile', 'Rakefile', + // PHP ecosystem + 'composer.json', + // General project markers + 'Makefile', 'CMakeLists.txt', 'README.md', 'LICENSE' + ]; + + const GLOB_MARKERS = [ + // .NET/C# ecosystem - patterns that need glob matching + '*.csproj', '*.sln', '*.fsproj' + ]; + + // Check exact matches first (fast path) + const hasExactMatch = EXACT_MARKERS.some(marker => + existsSync(path.join(projectPath, marker)) + ); + + if (hasExactMatch) { + // Found a project marker, proceed to fix + } else { + // Check glob patterns - read directory once and cache for all patterns + let directoryFiles: string[] | null = null; + const MAX_FILES_TO_CHECK = 500; // Limit to avoid reading huge directories + + const hasGlobMatch = GLOB_MARKERS.some(pattern => { + // Validate pattern - only support simple glob patterns for security + if (pattern.includes('..') || pattern.includes('/')) { + console.warn(`[GIT] Unsupported glob pattern ignored: ${pattern}`); + return false; + } + + // Lazy-load directory listing, cached across patterns + if (directoryFiles === null) { + try { + const allFiles = readdirSync(projectPath); + // Limit to first N entries to avoid performance issues + directoryFiles = allFiles.slice(0, MAX_FILES_TO_CHECK); + if (allFiles.length > MAX_FILES_TO_CHECK) { + console.warn(`[GIT] Directory has ${allFiles.length} entries, checking only first ${MAX_FILES_TO_CHECK}`); + } + } catch (error) { + // Log the error for debugging instead of silently swallowing + console.warn(`[GIT] Failed to read directory ${projectPath}:`, error instanceof Error ? error.message : String(error)); + directoryFiles = []; + } + } + + // Use minimatch for proper glob pattern matching + return directoryFiles.some(file => minimatch(file, pattern, { nocase: true })); + }); + + if (!hasGlobMatch) { + return false; // Legitimately bare repo + } + } + + // Fix the misconfiguration + console.warn('[GIT] Detected misconfigured bare repository with source files. Auto-fixing by unsetting core.bare...'); + execFileSync( + getToolPath('git'), + ['config', '--unset', 'core.bare'], + { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ); + console.warn('[GIT] Fixed: core.bare has been unset. Git operations should now work correctly.'); + return true; + } catch { + return false; + } +} + +/** + * Check if a path is a valid git working tree (not a bare repository). + * Returns true if the path is inside a git repository with a working tree. + * + * NOTE: This is a pure check with no side-effects. If you need to fix + * misconfigured bare repos before an operation, call fixMisconfiguredBareRepo() + * explicitly before calling this function. + * + * @param projectPath - Path to check + * @returns true if it's a valid working tree, false if bare or not a git repo + */ +function isGitWorkTree(projectPath: string): boolean { + try { + // Use git rev-parse --is-inside-work-tree which returns "true" for working trees + // and fails for bare repos or non-git directories + const result = execFileSync( + getToolPath('git'), + ['rev-parse', '--is-inside-work-tree'], + { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ); + return result.trim() === 'true'; + } catch { + // Not a working tree (could be bare repo or not a git repo at all) + return false; + } +} + /** * IDE and Terminal detection and launching utilities */ @@ -674,12 +818,14 @@ const TERMINAL_DETECTION: Partial '\'' +function escapeSingleQuotedPath(dirPath: string): string { + // Single quotes are escaped by ending the string, adding an escaped quote, + // and starting a new string: ' -> '\'' + // This pattern works in both AppleScript and POSIX shells (bash, sh, zsh) return dirPath.replace(/'/g, "'\\''"); } @@ -1069,8 +1215,8 @@ async function openInTerminal(dirPath: string, terminal: SupportedTerminal, cust if (platform === 'darwin') { // macOS: Use open command with the directory - // Escape single quotes in dirPath to prevent AppleScript injection - const escapedPath = escapeAppleScriptPath(dirPath); + // Escape single quotes in dirPath to prevent script injection + const escapedPath = escapeSingleQuotedPath(dirPath); if (terminal === 'system') { // Use AppleScript to open Terminal.app at the directory @@ -1112,7 +1258,7 @@ async function openInTerminal(dirPath: string, terminal: SupportedTerminal, cust } catch { // xterm doesn't have --working-directory, use -e with a script // Escape the path for shell use within the xterm command - const escapedPath = escapeAppleScriptPath(dirPath); + const escapedPath = escapeSingleQuotedPath(dirPath); await execFileAsync('xterm', ['-e', `cd '${escapedPath}' && bash`]); } } @@ -1158,7 +1304,7 @@ export function registerWorktreeHandlers( ): void { /** * Get the worktree status for a task - * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/ + * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/ */ ipcMain.handle( IPC_CHANNELS.TASK_WORKTREE_STATUS, @@ -1169,10 +1315,10 @@ export function registerWorktreeHandlers( return { success: false, error: 'Task not found' }; } - // Per-spec worktree path: .worktrees/{spec-name}/ - const worktreePath = path.join(project.path, '.worktrees', task.specId); + // Find worktree at .auto-claude/worktrees/tasks/{spec-name}/ + const worktreePath = findTaskWorktree(project.path, task.specId); - if (!existsSync(worktreePath)) { + if (!worktreePath) { return { success: true, data: { exists: false } @@ -1268,7 +1414,7 @@ export function registerWorktreeHandlers( /** * Get the diff for a task's worktree - * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/ + * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/ */ ipcMain.handle( IPC_CHANNELS.TASK_WORKTREE_DIFF, @@ -1279,10 +1425,10 @@ export function registerWorktreeHandlers( return { success: false, error: 'Task not found' }; } - // Per-spec worktree path: .worktrees/{spec-name}/ - const worktreePath = path.join(project.path, '.worktrees', task.specId); + // Find worktree at .auto-claude/worktrees/tasks/{spec-name}/ + const worktreePath = findTaskWorktree(project.path, task.specId); - if (!existsSync(worktreePath)) { + if (!worktreePath) { return { success: false, error: 'No worktree found for this task' }; } @@ -1400,6 +1546,12 @@ export function registerWorktreeHandlers( debug('Found task:', task.specId, 'project:', project.path); + // Auto-fix any misconfigured bare repo before merge operation + // This prevents issues where git operations fail due to incorrect bare=true config + if (fixMisconfiguredBareRepo(project.path)) { + debug('Fixed misconfigured bare repository at:', project.path); + } + // Use run.py --merge to handle the merge const sourcePath = getEffectiveSourcePath(); if (!sourcePath) { @@ -1415,8 +1567,8 @@ export function registerWorktreeHandlers( } // Check worktree exists before merge - const worktreePath = path.join(project.path, '.worktrees', task.specId); - debug('Worktree path:', worktreePath, 'exists:', existsSync(worktreePath)); + const worktreePath = findTaskWorktree(project.path, task.specId); + debug('Worktree path:', worktreePath, 'exists:', !!worktreePath); // Check if changes are already staged (for stage-only mode) if (options?.noCommit) { @@ -1443,14 +1595,18 @@ export function registerWorktreeHandlers( } } - // Get git status before merge - try { - const gitStatusBefore = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' }); - debug('Git status BEFORE merge in main project:\n', gitStatusBefore || '(clean)'); - const gitBranch = execFileSync(getToolPath('git'), ['branch', '--show-current'], { cwd: project.path, encoding: 'utf-8' }).trim(); - debug('Current branch:', gitBranch); - } catch (e) { - debug('Failed to get git status before:', e); + // Get git status before merge (only if project is a working tree, not a bare repo) + if (isGitWorkTree(project.path)) { + try { + const gitStatusBefore = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' }); + debug('Git status BEFORE merge in main project:\n', gitStatusBefore || '(clean)'); + const gitBranch = execFileSync(getToolPath('git'), ['branch', '--show-current'], { cwd: project.path, encoding: 'utf-8' }).trim(); + debug('Current branch:', gitBranch); + } catch (e) { + debug('Failed to get git status before:', e); + } + } else { + debug('Project is a bare repository - skipping pre-merge git status check'); } const args = [ @@ -1465,11 +1621,18 @@ export function registerWorktreeHandlers( args.push('--no-commit'); } - // Add --base-branch if task was created with a specific base branch + // Add --base-branch with proper priority: + // 1. Task metadata baseBranch (explicit task-level override) + // 2. Project settings mainBranch (project-level default) + // This matches the logic in execution-handlers.ts const taskBaseBranch = getTaskBaseBranch(specDir); - if (taskBaseBranch) { - args.push('--base-branch', taskBaseBranch); - debug('Using stored base branch:', taskBaseBranch); + const projectMainBranch = project.settings?.mainBranch; + const effectiveBaseBranch = taskBaseBranch || projectMainBranch; + + if (effectiveBaseBranch) { + args.push('--base-branch', effectiveBaseBranch); + debug('Using base branch:', effectiveBaseBranch, + `(source: ${taskBaseBranch ? 'task metadata' : 'project settings'})`); } // Use configured Python path (venv if ready, otherwise bundled/system) @@ -1594,14 +1757,18 @@ export function registerWorktreeHandlers( debug('Full stdout:', stdout); debug('Full stderr:', stderr); - // Get git status after merge - try { - const gitStatusAfter = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' }); - debug('Git status AFTER merge in main project:\n', gitStatusAfter || '(clean)'); - const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' }); - debug('Staged changes:\n', gitDiffStaged || '(none)'); - } catch (e) { - debug('Failed to get git status after:', e); + // Get git status after merge (only if project is a working tree, not a bare repo) + if (isGitWorkTree(project.path)) { + try { + const gitStatusAfter = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' }); + debug('Git status AFTER merge in main project:\n', gitStatusAfter || '(clean)'); + const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' }); + debug('Staged changes:\n', gitDiffStaged || '(none)'); + } catch (e) { + debug('Failed to get git status after:', e); + } + } else { + debug('Project is a bare repository - skipping git status check (this is normal for worktree-based projects)'); } if (code === 0) { @@ -1613,33 +1780,39 @@ export function registerWorktreeHandlers( let mergeAlreadyCommitted = false; if (isStageOnly) { - try { - const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' }); - hasActualStagedChanges = gitDiffStaged.trim().length > 0; - debug('Stage-only verification: hasActualStagedChanges:', hasActualStagedChanges); - - if (!hasActualStagedChanges) { - // Check if worktree branch was already merged (merge commit exists) - const specBranch = `auto-claude/${task.specId}`; - try { - // Check if current branch contains all commits from spec branch - // git merge-base --is-ancestor returns exit code 0 if true, 1 if false - execFileSync( - 'git', - ['merge-base', '--is-ancestor', specBranch, 'HEAD'], - { cwd: project.path, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } - ); - // If we reach here, the command succeeded (exit code 0) - branch is merged - mergeAlreadyCommitted = true; - debug('Merge already committed check:', mergeAlreadyCommitted); - } catch { - // Exit code 1 means not merged, or branch may not exist - mergeAlreadyCommitted = false; - debug('Could not check merge status, assuming not merged'); + // Only check staged changes if project is a working tree (not bare repo) + if (isGitWorkTree(project.path)) { + try { + const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' }); + hasActualStagedChanges = gitDiffStaged.trim().length > 0; + debug('Stage-only verification: hasActualStagedChanges:', hasActualStagedChanges); + + if (!hasActualStagedChanges) { + // Check if worktree branch was already merged (merge commit exists) + const specBranch = `auto-claude/${task.specId}`; + try { + // Check if current branch contains all commits from spec branch + // git merge-base --is-ancestor returns exit code 0 if true, 1 if false + execFileSync( + getToolPath('git'), + ['merge-base', '--is-ancestor', specBranch, 'HEAD'], + { cwd: project.path, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ); + // If we reach here, the command succeeded (exit code 0) - branch is merged + mergeAlreadyCommitted = true; + debug('Merge already committed check:', mergeAlreadyCommitted); + } catch { + // Exit code 1 means not merged, or branch may not exist + mergeAlreadyCommitted = false; + debug('Could not check merge status, assuming not merged'); + } } + } catch (e) { + debug('Failed to verify staged changes:', e); } - } catch (e) { - debug('Failed to verify staged changes:', e); + } else { + // For bare repos, skip staging verification - merge happens in worktree + debug('Project is a bare repository - skipping staged changes verification'); } } @@ -1657,6 +1830,33 @@ export function registerWorktreeHandlers( message = 'Changes were already merged and committed. Task marked as done.'; staged = false; debug('Stage-only requested but merge already committed. Marking as done.'); + + // Clean up worktree since merge is complete (fixes #243) + // This is the same cleanup as the full merge path, needed because + // stageOnly defaults to true for human_review tasks + try { + if (worktreePath && existsSync(worktreePath)) { + execFileSync(getToolPath('git'), ['worktree', 'remove', '--force', worktreePath], { + cwd: project.path, + encoding: 'utf-8' + }); + debug('Worktree cleaned up (already merged):', worktreePath); + + // Also delete the task branch + const taskBranch = `auto-claude/${task.specId}`; + try { + execFileSync(getToolPath('git'), ['branch', '-D', taskBranch], { + cwd: project.path, + encoding: 'utf-8' + }); + debug('Task branch deleted:', taskBranch); + } catch { + // Branch might not exist or already deleted + } + } + } catch (cleanupErr) { + debug('Worktree cleanup failed (non-fatal):', cleanupErr); + } } else if (isStageOnly && !hasActualStagedChanges) { // Stage-only was requested but no changes to stage (and not committed) // This could mean nothing to merge or an error - keep in human_review for investigation @@ -1677,6 +1877,33 @@ export function registerWorktreeHandlers( planStatus = 'completed'; message = 'Changes merged successfully'; staged = false; + + // Clean up worktree after successful full merge (fixes #243) + // This allows drag-to-Done workflow since TASK_UPDATE_STATUS blocks 'done' when worktree exists + try { + if (worktreePath && existsSync(worktreePath)) { + execFileSync(getToolPath('git'), ['worktree', 'remove', '--force', worktreePath], { + cwd: project.path, + encoding: 'utf-8' + }); + debug('Worktree cleaned up after full merge:', worktreePath); + + // Also delete the task branch since we merged successfully + const taskBranch = `auto-claude/${task.specId}`; + try { + execFileSync(getToolPath('git'), ['branch', '-D', taskBranch], { + cwd: project.path, + encoding: 'utf-8' + }); + debug('Task branch deleted:', taskBranch); + } catch { + // Branch might not exist or already deleted + } + } + } catch (cleanupErr) { + debug('Worktree cleanup failed (non-fatal):', cleanupErr); + // Non-fatal - merge succeeded, cleanup can be done manually + } } debug('Merge result. isStageOnly:', isStageOnly, 'newStatus:', newStatus, 'staged:', staged); @@ -1701,10 +1928,15 @@ export function registerWorktreeHandlers( // Issue #243: We must update BOTH the main project's plan AND the worktree's plan (if it exists) // because ProjectStore prefers the worktree version when deduplicating tasks. // OPTIMIZATION: Use async I/O and parallel updates to prevent UI blocking - const planPaths = [ + // NOTE: The worktree has the same directory structure as main project + const planPaths: { path: string; isMain: boolean }[] = [ { path: path.join(specDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN), isMain: true }, - { path: path.join(worktreePath, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN), isMain: false } ]; + // Add worktree plan path if worktree exists + if (worktreePath) { + const worktreeSpecDir = path.join(worktreePath, project.autoBuildPath || '.auto-claude', 'specs', task.specId); + planPaths.push({ path: path.join(worktreeSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN), isMain: false }); + } const { promises: fsPromises } = require('fs'); @@ -1766,8 +1998,15 @@ export function registerWorktreeHandlers( } }; - // Run async updates without blocking the response - updatePlans().catch(err => debug('Background plan update failed:', err)); + // IMPORTANT: Wait for plan updates to complete before responding (fixes #243) + // Previously this was "fire and forget" which caused a race condition: + // resolve() would return before files were written, and UI refresh would read old status + try { + await updatePlans(); + } catch (err) { + debug('Plan update failed:', err); + // Non-fatal: UI will still update, but status may not persist across refresh + } const mainWindow = getMainWindow(); if (mainWindow) { @@ -1785,8 +2024,17 @@ export function registerWorktreeHandlers( } }); } else { - // Check if there were conflicts - const hasConflicts = stdout.includes('conflict') || stderr.includes('conflict'); + // Check if there were actual merge conflicts + // More specific patterns to avoid false positives from debug output like "files_with_conflicts: 0" + const conflictPatterns = [ + /CONFLICT \(/i, // Git merge conflict marker + /merge conflict/i, // Explicit merge conflict message + /\bconflict detected\b/i, // Our own conflict detection message + /\bconflicts? found\b/i, // "conflicts found" or "conflict found" + /Automatic merge failed/i, // Git's automatic merge failure message + ]; + const combinedOutput = stdout + stderr; + const hasConflicts = conflictPatterns.some(pattern => pattern.test(combinedOutput)); debug('Merge failed. hasConflicts:', hasConflicts); resolve({ @@ -1863,27 +2111,31 @@ export function registerWorktreeHandlers( } console.warn('[IPC] Found task:', task.specId, 'project:', project.name); - // Check for uncommitted changes in the main project + // Check for uncommitted changes in the main project (only if not a bare repo) let hasUncommittedChanges = false; let uncommittedFiles: string[] = []; - try { - const gitStatus = execFileSync(getToolPath('git'), ['status', '--porcelain'], { - cwd: project.path, - encoding: 'utf-8' - }); + if (isGitWorkTree(project.path)) { + try { + const gitStatus = execFileSync(getToolPath('git'), ['status', '--porcelain'], { + cwd: project.path, + encoding: 'utf-8' + }); - if (gitStatus && gitStatus.trim()) { - // Parse the status output to get file names - // Format: XY filename (where X and Y are status chars, then space, then filename) - uncommittedFiles = gitStatus - .split('\n') - .filter(line => line.trim()) - .map(line => line.substring(3).trim()); // Skip 2 status chars + 1 space, trim any trailing whitespace + if (gitStatus && gitStatus.trim()) { + // Parse the status output to get file names + // Format: XY filename (where X and Y are status chars, then space, then filename) + uncommittedFiles = gitStatus + .split('\n') + .filter(line => line.trim()) + .map(line => line.substring(3).trim()); // Skip 2 status chars + 1 space, trim any trailing whitespace - hasUncommittedChanges = uncommittedFiles.length > 0; + hasUncommittedChanges = uncommittedFiles.length > 0; + } + } catch (e) { + console.error('[IPC] Failed to check git status:', e); } - } catch (e) { - console.error('[IPC] Failed to check git status:', e); + } else { + console.warn('[IPC] Project is a bare repository - skipping uncommitted changes check'); } const sourcePath = getEffectiveSourcePath(); @@ -1901,11 +2153,18 @@ export function registerWorktreeHandlers( '--merge-preview' ]; - // Add --base-branch if task was created with a specific base branch + // Add --base-branch with proper priority: + // 1. Task metadata baseBranch (explicit task-level override) + // 2. Project settings mainBranch (project-level default) + // This matches the logic in execution-handlers.ts const taskBaseBranch = getTaskBaseBranch(specDir); - if (taskBaseBranch) { - args.push('--base-branch', taskBaseBranch); - console.warn('[IPC] Using stored base branch for preview:', taskBaseBranch); + const projectMainBranch = project.settings?.mainBranch; + const effectiveBaseBranch = taskBaseBranch || projectMainBranch; + + if (effectiveBaseBranch) { + args.push('--base-branch', effectiveBaseBranch); + console.warn('[IPC] Using base branch for preview:', effectiveBaseBranch, + `(source: ${taskBaseBranch ? 'task metadata' : 'project settings'})`); } // Use configured Python path (venv if ready, otherwise bundled/system) @@ -2012,7 +2271,7 @@ export function registerWorktreeHandlers( /** * Discard the worktree changes - * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/ + * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/ */ ipcMain.handle( IPC_CHANNELS.TASK_WORKTREE_DISCARD, @@ -2023,10 +2282,10 @@ export function registerWorktreeHandlers( return { success: false, error: 'Task not found' }; } - // Per-spec worktree path: .worktrees/{spec-name}/ - const worktreePath = path.join(project.path, '.worktrees', task.specId); + // Find worktree at .auto-claude/worktrees/tasks/{spec-name}/ + const worktreePath = findTaskWorktree(project.path, task.specId); - if (!existsSync(worktreePath)) { + if (!worktreePath) { return { success: true, data: { @@ -2090,7 +2349,7 @@ export function registerWorktreeHandlers( /** * List all spec worktrees for a project - * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/ + * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/ */ ipcMain.handle( IPC_CHANNELS.TASK_LIST_WORKTREES, @@ -2101,23 +2360,11 @@ export function registerWorktreeHandlers( return { success: false, error: 'Project not found' }; } - const worktreesDir = path.join(project.path, '.worktrees'); const worktrees: WorktreeListItem[] = []; + const worktreesDir = getTaskWorktreeDir(project.path); - if (!existsSync(worktreesDir)) { - return { success: true, data: { worktrees } }; - } - - // Get all directories in .worktrees - const entries = readdirSync(worktreesDir); - for (const entry of entries) { - const entryPath = path.join(worktreesDir, entry); - const stat = statSync(entryPath); - - // Skip worker directories and non-directories - if (!stat.isDirectory() || entry.startsWith('worker-')) { - continue; - } + // Helper to process a single worktree entry + const processWorktreeEntry = (entry: string, entryPath: string) => { try { // Get branch info @@ -2188,6 +2435,22 @@ export function registerWorktreeHandlers( console.error(`Error getting info for worktree ${entry}:`, gitError); // Skip this worktree if we can't get git info } + }; + + // Scan worktrees directory + if (existsSync(worktreesDir)) { + const entries = readdirSync(worktreesDir); + for (const entry of entries) { + const entryPath = path.join(worktreesDir, entry); + try { + const stat = statSync(entryPath); + if (stat.isDirectory()) { + processWorktreeEntry(entry, entryPath); + } + } catch { + // Skip entries that can't be stat'd + } + } } return { success: true, data: { worktrees } }; diff --git a/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts b/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts index b76d136314..96edd3c437 100644 --- a/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts +++ b/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts @@ -9,6 +9,7 @@ import { projectStore } from '../project-store'; import { terminalNameGenerator } from '../terminal-name-generator'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; import { escapeShellArg, escapeShellArgWindows } from '../../shared/utils/shell-escape'; +import { getClaudeCliInvocationAsync } from '../claude-cli-utils'; /** @@ -53,7 +54,10 @@ export function registerTerminalHandlers( ipcMain.on( IPC_CHANNELS.TERMINAL_INVOKE_CLAUDE, (_, id: string, cwd?: string) => { - terminalManager.invokeClaude(id, cwd); + // Use async version to avoid blocking main process during CLI detection + terminalManager.invokeClaudeAsync(id, cwd).catch((error) => { + console.error('[terminal-handlers] Failed to invoke Claude:', error); + }); } ); @@ -76,6 +80,22 @@ export function registerTerminalHandlers( } ); + // Set terminal title (user renamed terminal in renderer) + ipcMain.on( + IPC_CHANNELS.TERMINAL_SET_TITLE, + (_, id: string, title: string) => { + terminalManager.setTitle(id, title); + } + ); + + // Set terminal worktree config (user changed worktree association in renderer) + ipcMain.on( + IPC_CHANNELS.TERMINAL_SET_WORKTREE_CONFIG, + (_, id: string, config: import('../../shared/types').TerminalWorktreeConfig | undefined) => { + terminalManager.setWorktreeConfig(id, config); + } + ); + // Claude profile management (multi-account support) ipcMain.handle( IPC_CHANNELS.CLAUDE_PROFILES_GET, @@ -321,7 +341,15 @@ export function registerTerminalHandlers( }); // Create a new terminal for the login process - await terminalManager.create({ id: terminalId, cwd: homeDir }); + const createResult = await terminalManager.create({ id: terminalId, cwd: homeDir }); + + // If terminal creation failed, return the error + if (!createResult.success) { + return { + success: false, + error: createResult.error || 'Failed to create terminal for authentication' + }; + } // Wait a moment for the terminal to initialize await new Promise(resolve => setTimeout(resolve, 500)); @@ -329,20 +357,30 @@ export function registerTerminalHandlers( // Build the login command with the profile's config dir // Use platform-specific syntax and escaping for environment variables let loginCommand: string; + const { command: claudeCmd, env: claudeEnv } = await getClaudeCliInvocationAsync(); + const pathPrefix = claudeEnv.PATH + ? (process.platform === 'win32' + ? `set "PATH=${escapeShellArgWindows(claudeEnv.PATH)}" && ` + : `export PATH=${escapeShellArg(claudeEnv.PATH)} && `) + : ''; + const shellClaudeCmd = process.platform === 'win32' + ? `"${escapeShellArgWindows(claudeCmd)}"` + : escapeShellArg(claudeCmd); + if (!profile.isDefault && profile.configDir) { if (process.platform === 'win32') { // SECURITY: Use Windows-specific escaping for cmd.exe const escapedConfigDir = escapeShellArgWindows(profile.configDir); // Windows cmd.exe syntax: set "VAR=value" with %VAR% for expansion - loginCommand = `set "CLAUDE_CONFIG_DIR=${escapedConfigDir}" && echo Config dir: %CLAUDE_CONFIG_DIR% && claude setup-token`; + loginCommand = `${pathPrefix}set "CLAUDE_CONFIG_DIR=${escapedConfigDir}" && echo Config dir: %CLAUDE_CONFIG_DIR% && ${shellClaudeCmd} setup-token`; } else { // SECURITY: Use POSIX escaping for bash/zsh const escapedConfigDir = escapeShellArg(profile.configDir); // Unix/Mac bash/zsh syntax: export VAR=value with $VAR for expansion - loginCommand = `export CLAUDE_CONFIG_DIR=${escapedConfigDir} && echo "Config dir: $CLAUDE_CONFIG_DIR" && claude setup-token`; + loginCommand = `${pathPrefix}export CLAUDE_CONFIG_DIR=${escapedConfigDir} && echo "Config dir: $CLAUDE_CONFIG_DIR" && ${shellClaudeCmd} setup-token`; } } else { - loginCommand = 'claude setup-token'; + loginCommand = `${pathPrefix}${shellClaudeCmd} setup-token`; } debugLog('[IPC] Sending login command to terminal:', loginCommand); @@ -350,10 +388,11 @@ export function registerTerminalHandlers( // Write the login command to the terminal terminalManager.write(terminalId, `${loginCommand}\r`); - // Notify the renderer that a login terminal was created + // Notify the renderer that an auth terminal was created + // This allows the UI to display the terminal so users can see the OAuth flow const mainWindow = getMainWindow(); if (mainWindow) { - mainWindow.webContents.send('claude-profile-login-terminal', { + mainWindow.webContents.send(IPC_CHANNELS.TERMINAL_AUTH_CREATED, { terminalId, profileId, profileName: profile.name @@ -599,7 +638,21 @@ export function registerTerminalHandlers( ipcMain.on( IPC_CHANNELS.TERMINAL_RESUME_CLAUDE, (_, id: string, sessionId?: string) => { - terminalManager.resumeClaude(id, sessionId); + // Use async version to avoid blocking main process during CLI detection + terminalManager.resumeClaudeAsync(id, sessionId).catch((error) => { + console.error('[terminal-handlers] Failed to resume Claude:', error); + }); + } + ); + + // Activate deferred Claude resume when terminal becomes active + // This is triggered by the renderer when a terminal with pendingClaudeResume becomes the active tab + ipcMain.on( + IPC_CHANNELS.TERMINAL_ACTIVATE_DEFERRED_RESUME, + (_, id: string) => { + terminalManager.activateDeferredResume(id).catch((error) => { + console.error('[terminal-handlers] Failed to activate deferred Claude resume:', error); + }); } ); diff --git a/apps/frontend/src/main/ipc-handlers/terminal/index.ts b/apps/frontend/src/main/ipc-handlers/terminal/index.ts new file mode 100644 index 0000000000..3b235fe038 --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/terminal/index.ts @@ -0,0 +1,17 @@ +/** + * Terminal handlers module + * + * This module organizes terminal worktree-related IPC handlers: + * - Worktree operations (create, list, remove) + */ + +import { registerTerminalWorktreeHandlers } from './worktree-handlers'; + +/** + * Register all terminal worktree IPC handlers + */ +export function registerTerminalWorktreeIpcHandlers(): void { + registerTerminalWorktreeHandlers(); +} + +export { registerTerminalWorktreeHandlers } from './worktree-handlers'; diff --git a/apps/frontend/src/main/ipc-handlers/terminal/worktree-handlers.ts b/apps/frontend/src/main/ipc-handlers/terminal/worktree-handlers.ts new file mode 100644 index 0000000000..ca91fd70fb --- /dev/null +++ b/apps/frontend/src/main/ipc-handlers/terminal/worktree-handlers.ts @@ -0,0 +1,560 @@ +import { ipcMain } from 'electron'; +import { IPC_CHANNELS } from '../../../shared/constants'; +import type { + IPCResult, + CreateTerminalWorktreeRequest, + TerminalWorktreeConfig, + TerminalWorktreeResult, +} from '../../../shared/types'; +import path from 'path'; +import { existsSync, mkdirSync, writeFileSync, readFileSync, readdirSync, rmSync } from 'fs'; +import { execFileSync } from 'child_process'; +import { minimatch } from 'minimatch'; +import { debugLog, debugError } from '../../../shared/utils/debug-logger'; +import { projectStore } from '../../project-store'; +import { parseEnvFile } from '../utils'; +import { + getTerminalWorktreeDir, + getTerminalWorktreePath, + getTerminalWorktreeMetadataDir, + getTerminalWorktreeMetadataPath, +} from '../../worktree-paths'; + +// Shared validation regex for worktree names - lowercase alphanumeric with dashes/underscores +// Must start and end with alphanumeric character +const WORKTREE_NAME_REGEX = /^[a-z0-9][a-z0-9_-]*[a-z0-9]$|^[a-z0-9]$/; + +// Validation regex for git branch names - allows alphanumeric, dots, slashes, dashes, underscores +const GIT_BRANCH_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9._/-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$/; + +/** + * Fix repositories that are incorrectly marked with core.bare=true. + * This can happen when git worktree operations incorrectly set bare=true + * on a working repository that has source files. + * + * Returns true if a fix was applied, false otherwise. + */ +function fixMisconfiguredBareRepo(projectPath: string): boolean { + try { + // Check if bare=true is set + const bareConfig = execFileSync( + 'git', + ['config', '--get', 'core.bare'], + { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ).trim().toLowerCase(); + + if (bareConfig !== 'true') { + return false; // Not marked as bare, nothing to fix + } + + // Check if there are source files (indicating misconfiguration) + // A truly bare repo would only have git internals, not source code + // This covers multiple ecosystems: JS/TS, Python, Rust, Go, Java, C#, etc. + const EXACT_MARKERS = [ + // JavaScript/TypeScript ecosystem + 'package.json', 'apps', 'src', + // Python ecosystem + 'pyproject.toml', 'setup.py', 'requirements.txt', 'Pipfile', + // Rust ecosystem + 'Cargo.toml', + // Go ecosystem + 'go.mod', 'go.sum', 'cmd', 'main.go', + // Java/JVM ecosystem + 'pom.xml', 'build.gradle', 'build.gradle.kts', + // Ruby ecosystem + 'Gemfile', 'Rakefile', + // PHP ecosystem + 'composer.json', + // General project markers + 'Makefile', 'CMakeLists.txt', 'README.md', 'LICENSE' + ]; + + const GLOB_MARKERS = [ + // .NET/C# ecosystem - patterns that need glob matching + '*.csproj', '*.sln', '*.fsproj' + ]; + + // Check exact matches first (fast path) + const hasExactMatch = EXACT_MARKERS.some(marker => + existsSync(path.join(projectPath, marker)) + ); + + if (hasExactMatch) { + // Found a project marker, proceed to fix + } else { + // Check glob patterns - read directory once and cache for all patterns + let directoryFiles: string[] | null = null; + const MAX_FILES_TO_CHECK = 500; + + const hasGlobMatch = GLOB_MARKERS.some(pattern => { + // Validate pattern - only support simple glob patterns for security + if (pattern.includes('..') || pattern.includes('/')) { + debugLog('[TerminalWorktree] Unsupported glob pattern ignored:', pattern); + return false; + } + + // Lazy-load directory listing, cached across patterns + if (directoryFiles === null) { + try { + const allFiles = readdirSync(projectPath); + directoryFiles = allFiles.slice(0, MAX_FILES_TO_CHECK); + if (allFiles.length > MAX_FILES_TO_CHECK) { + debugLog(`[TerminalWorktree] Directory has ${allFiles.length} entries, checking only first ${MAX_FILES_TO_CHECK}`); + } + } catch (error) { + debugError('[TerminalWorktree] Failed to read directory:', error); + directoryFiles = []; + } + } + + // Use minimatch for proper glob pattern matching + return directoryFiles.some(file => minimatch(file, pattern, { nocase: true })); + }); + + if (!hasGlobMatch) { + return false; // Legitimately bare repo + } + } + + // Fix the misconfiguration + debugLog('[TerminalWorktree] Detected misconfigured bare repository with source files. Auto-fixing by unsetting core.bare...'); + execFileSync( + 'git', + ['config', '--unset', 'core.bare'], + { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] } + ); + debugLog('[TerminalWorktree] Fixed: core.bare has been unset. Git operations should now work correctly.'); + return true; + } catch { + return false; + } +} + +/** + * Validate that projectPath is a registered project + */ +function isValidProjectPath(projectPath: string): boolean { + const projects = projectStore.getProjects(); + return projects.some(p => p.path === projectPath); +} + +const MAX_TERMINAL_WORKTREES = 12; + +/** + * Get the default branch from project settings OR env config + */ +function getDefaultBranch(projectPath: string): string { + const project = projectStore.getProjects().find(p => p.path === projectPath); + if (project?.settings?.mainBranch) { + debugLog('[TerminalWorktree] Using mainBranch from project settings:', project.settings.mainBranch); + return project.settings.mainBranch; + } + + const envPath = path.join(projectPath, '.auto-claude', '.env'); + if (existsSync(envPath)) { + try { + const content = readFileSync(envPath, 'utf-8'); + const vars = parseEnvFile(content); + if (vars['DEFAULT_BRANCH']) { + debugLog('[TerminalWorktree] Using DEFAULT_BRANCH from env config:', vars['DEFAULT_BRANCH']); + return vars['DEFAULT_BRANCH']; + } + } catch (error) { + debugError('[TerminalWorktree] Error reading env file:', error); + } + } + + for (const branch of ['main', 'master']) { + try { + execFileSync('git', ['rev-parse', '--verify', branch], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Auto-detected branch:', branch); + return branch; + } catch { + // Branch doesn't exist, try next + } + } + + // Fallback to current branch - wrap in try-catch + try { + const currentBranch = execFileSync('git', ['rev-parse', '--abbrev-ref', 'HEAD'], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }).trim(); + debugLog('[TerminalWorktree] Falling back to current branch:', currentBranch); + return currentBranch; + } catch (error) { + debugError('[TerminalWorktree] Error detecting current branch:', error); + return 'main'; // Safe default + } +} + +function saveWorktreeConfig(projectPath: string, name: string, config: TerminalWorktreeConfig): void { + const metadataDir = getTerminalWorktreeMetadataDir(projectPath); + mkdirSync(metadataDir, { recursive: true }); + const metadataPath = getTerminalWorktreeMetadataPath(projectPath, name); + writeFileSync(metadataPath, JSON.stringify(config, null, 2)); +} + +function loadWorktreeConfig(projectPath: string, name: string): TerminalWorktreeConfig | null { + // Check new metadata location first + const metadataPath = getTerminalWorktreeMetadataPath(projectPath, name); + if (existsSync(metadataPath)) { + try { + return JSON.parse(readFileSync(metadataPath, 'utf-8')); + } catch (error) { + debugError('[TerminalWorktree] Corrupted config at:', metadataPath, error); + return null; + } + } + + // Backwards compatibility: check legacy location inside worktree + const legacyConfigPath = path.join(getTerminalWorktreePath(projectPath, name), 'config.json'); + if (existsSync(legacyConfigPath)) { + try { + const config = JSON.parse(readFileSync(legacyConfigPath, 'utf-8')); + // Migrate to new location + saveWorktreeConfig(projectPath, name, config); + // Clean up legacy file + try { + rmSync(legacyConfigPath); + debugLog('[TerminalWorktree] Migrated config from legacy location:', name); + } catch { + debugLog('[TerminalWorktree] Could not remove legacy config:', legacyConfigPath); + } + return config; + } catch (error) { + debugError('[TerminalWorktree] Corrupted legacy config at:', legacyConfigPath, error); + return null; + } + } + + return null; +} + +async function createTerminalWorktree( + request: CreateTerminalWorktreeRequest +): Promise { + const { terminalId, name, taskId, createGitBranch, projectPath, baseBranch: customBaseBranch } = request; + + debugLog('[TerminalWorktree] Creating worktree:', { name, taskId, createGitBranch, projectPath, customBaseBranch }); + + // Validate projectPath against registered projects + if (!isValidProjectPath(projectPath)) { + return { + success: false, + error: 'Invalid project path', + }; + } + + // Validate worktree name - use shared regex (lowercase only) + if (!WORKTREE_NAME_REGEX.test(name)) { + return { + success: false, + error: 'Invalid worktree name. Use lowercase letters, numbers, dashes, and underscores. Must start and end with alphanumeric.', + }; + } + + // CRITICAL: Validate customBaseBranch to prevent command injection + if (customBaseBranch && !GIT_BRANCH_REGEX.test(customBaseBranch)) { + return { + success: false, + error: 'Invalid base branch name', + }; + } + + const existing = await listTerminalWorktrees(projectPath); + if (existing.length >= MAX_TERMINAL_WORKTREES) { + return { + success: false, + error: `Maximum of ${MAX_TERMINAL_WORKTREES} terminal worktrees reached.`, + }; + } + + // Auto-fix any misconfigured bare repo before worktree operations + // This prevents crashes when git worktree operations have incorrectly set bare=true + if (fixMisconfiguredBareRepo(projectPath)) { + debugLog('[TerminalWorktree] Fixed misconfigured bare repository at:', projectPath); + } + + const worktreePath = getTerminalWorktreePath(projectPath, name); + const branchName = `terminal/${name}`; + let directoryCreated = false; + + try { + if (existsSync(worktreePath)) { + return { success: false, error: `Worktree '${name}' already exists.` }; + } + + mkdirSync(getTerminalWorktreeDir(projectPath), { recursive: true }); + directoryCreated = true; + + // Use custom base branch if provided, otherwise detect default + const baseBranch = customBaseBranch || getDefaultBranch(projectPath); + debugLog('[TerminalWorktree] Using base branch:', baseBranch, customBaseBranch ? '(custom)' : '(default)'); + + // Check if baseBranch is already a remote ref (e.g., "origin/feature-x") + const isRemoteRef = baseBranch.startsWith('origin/'); + const remoteBranchName = isRemoteRef ? baseBranch.replace('origin/', '') : baseBranch; + + // Fetch the branch from remote + try { + execFileSync('git', ['fetch', 'origin', remoteBranchName], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Fetched latest from origin/' + remoteBranchName); + } catch { + debugLog('[TerminalWorktree] Could not fetch from remote, continuing with local branch'); + } + + // Determine the base ref to use for worktree creation + let baseRef = baseBranch; + if (isRemoteRef) { + // Already a remote ref, use as-is + baseRef = baseBranch; + debugLog('[TerminalWorktree] Using remote ref directly:', baseRef); + } else { + // Check if remote version exists and use it for latest code + try { + execFileSync('git', ['rev-parse', '--verify', `origin/${baseBranch}`], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + baseRef = `origin/${baseBranch}`; + debugLog('[TerminalWorktree] Using remote ref:', baseRef); + } catch { + debugLog('[TerminalWorktree] Remote ref not found, using local branch:', baseBranch); + } + } + + if (createGitBranch) { + execFileSync('git', ['worktree', 'add', '-b', branchName, worktreePath, baseRef], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Created worktree with branch:', branchName, 'from', baseRef); + } else { + execFileSync('git', ['worktree', 'add', '--detach', worktreePath, baseRef], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Created worktree in detached HEAD mode from', baseRef); + } + + const config: TerminalWorktreeConfig = { + name, + worktreePath, + branchName: createGitBranch ? branchName : '', + baseBranch, + hasGitBranch: createGitBranch, + taskId, + createdAt: new Date().toISOString(), + terminalId, + }; + + saveWorktreeConfig(projectPath, name, config); + debugLog('[TerminalWorktree] Saved config for worktree:', name); + + return { success: true, config }; + } catch (error) { + debugError('[TerminalWorktree] Error creating worktree:', error); + + // Cleanup: remove the worktree directory if git worktree creation failed + if (directoryCreated && existsSync(worktreePath)) { + try { + rmSync(worktreePath, { recursive: true, force: true }); + debugLog('[TerminalWorktree] Cleaned up failed worktree directory:', worktreePath); + // Also prune stale worktree registrations in case git worktree add partially succeeded + try { + execFileSync('git', ['worktree', 'prune'], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Pruned stale worktree registrations'); + } catch { + // Ignore prune errors - not critical + } + } catch (cleanupError) { + debugError('[TerminalWorktree] Failed to cleanup worktree directory:', cleanupError); + } + } + + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to create worktree', + }; + } +} + +async function listTerminalWorktrees(projectPath: string): Promise { + // Validate projectPath against registered projects + if (!isValidProjectPath(projectPath)) { + debugError('[TerminalWorktree] Invalid project path for listing:', projectPath); + return []; + } + + const configs: TerminalWorktreeConfig[] = []; + const seenNames = new Set(); + + // Scan new metadata directory + const metadataDir = getTerminalWorktreeMetadataDir(projectPath); + if (existsSync(metadataDir)) { + try { + for (const file of readdirSync(metadataDir, { withFileTypes: true })) { + if (file.isFile() && file.name.endsWith('.json')) { + const name = file.name.replace('.json', ''); + const config = loadWorktreeConfig(projectPath, name); + if (config) { + configs.push(config); + seenNames.add(name); + } + } + } + } catch (error) { + debugError('[TerminalWorktree] Error scanning metadata dir:', error); + } + } + + // Also scan worktree directory for legacy configs (will be migrated on load) + const worktreeDir = getTerminalWorktreeDir(projectPath); + if (existsSync(worktreeDir)) { + try { + for (const dir of readdirSync(worktreeDir, { withFileTypes: true })) { + if (dir.isDirectory() && !seenNames.has(dir.name)) { + const config = loadWorktreeConfig(projectPath, dir.name); + if (config) { + configs.push(config); + } + } + } + } catch (error) { + debugError('[TerminalWorktree] Error scanning worktree dir:', error); + } + } + + return configs; +} + +async function removeTerminalWorktree( + projectPath: string, + name: string, + deleteBranch: boolean = false +): Promise { + debugLog('[TerminalWorktree] Removing worktree:', { name, deleteBranch, projectPath }); + + // Validate projectPath against registered projects + if (!isValidProjectPath(projectPath)) { + return { success: false, error: 'Invalid project path' }; + } + + // Validate worktree name to prevent path traversal + if (!WORKTREE_NAME_REGEX.test(name)) { + return { success: false, error: 'Invalid worktree name' }; + } + + // Auto-fix any misconfigured bare repo before worktree operations + if (fixMisconfiguredBareRepo(projectPath)) { + debugLog('[TerminalWorktree] Fixed misconfigured bare repository at:', projectPath); + } + + const worktreePath = getTerminalWorktreePath(projectPath, name); + const config = loadWorktreeConfig(projectPath, name); + + if (!config) { + return { success: false, error: 'Worktree not found' }; + } + + try { + if (existsSync(worktreePath)) { + execFileSync('git', ['worktree', 'remove', '--force', worktreePath], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Removed git worktree'); + } + + if (deleteBranch && config.hasGitBranch && config.branchName) { + // Re-validate branch name from config file (defense in depth - config could be modified) + if (!GIT_BRANCH_REGEX.test(config.branchName)) { + debugError('[TerminalWorktree] Invalid branch name in config:', config.branchName); + } else { + try { + execFileSync('git', ['branch', '-D', config.branchName], { + cwd: projectPath, + encoding: 'utf-8', + stdio: ['pipe', 'pipe', 'pipe'], + }); + debugLog('[TerminalWorktree] Deleted branch:', config.branchName); + } catch { + debugLog('[TerminalWorktree] Branch not found or already deleted:', config.branchName); + } + } + } + + // Remove metadata file + const metadataPath = getTerminalWorktreeMetadataPath(projectPath, name); + if (existsSync(metadataPath)) { + try { + rmSync(metadataPath); + debugLog('[TerminalWorktree] Removed metadata file:', metadataPath); + } catch { + debugLog('[TerminalWorktree] Could not remove metadata file:', metadataPath); + } + } + + return { success: true }; + } catch (error) { + debugError('[TerminalWorktree] Error removing worktree:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to remove worktree', + }; + } +} + +export function registerTerminalWorktreeHandlers(): void { + ipcMain.handle( + IPC_CHANNELS.TERMINAL_WORKTREE_CREATE, + async (_, request: CreateTerminalWorktreeRequest): Promise => { + return createTerminalWorktree(request); + } + ); + + ipcMain.handle( + IPC_CHANNELS.TERMINAL_WORKTREE_LIST, + async (_, projectPath: string): Promise> => { + try { + const configs = await listTerminalWorktrees(projectPath); + return { success: true, data: configs }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to list worktrees', + }; + } + } + ); + + ipcMain.handle( + IPC_CHANNELS.TERMINAL_WORKTREE_REMOVE, + async ( + _, + projectPath: string, + name: string, + deleteBranch: boolean + ): Promise => { + return removeTerminalWorktree(projectPath, name, deleteBranch); + } + ); +} diff --git a/apps/frontend/src/main/memory-env-builder.ts b/apps/frontend/src/main/memory-env-builder.ts index 804c952600..6382757d73 100644 --- a/apps/frontend/src/main/memory-env-builder.ts +++ b/apps/frontend/src/main/memory-env-builder.ts @@ -8,6 +8,7 @@ */ import type { AppSettings } from '../shared/types/settings'; +import { getMemoriesDir } from './config-paths'; /** * Build environment variables for memory/Graphiti configuration from app settings. @@ -26,6 +27,10 @@ export function buildMemoryEnvVars(settings: AppSettings): Record apps/backend path.resolve(__dirname, '..', '..', '..', 'backend', 'query_memory.py'), path.resolve(app.getAppPath(), '..', 'backend', 'query_memory.py'), @@ -112,6 +114,68 @@ function getQueryScriptPath(): string | null { return null; } +/** + * Get the backend venv Python path. + * The backend venv has real_ladybug installed (required for memory operations). + * Falls back to getConfiguredPythonPath() for packaged apps. + */ +function getBackendPythonPath(): string { + // For packaged apps, use the bundled Python which has real_ladybug in site-packages + if (app.isPackaged) { + const fallbackPython = getConfiguredPythonPath(); + console.log(`[MemoryService] Using bundled Python for packaged app: ${fallbackPython}`); + return fallbackPython; + } + + // Development mode: Find the backend venv which has real_ladybug installed + const possibleBackendPaths = [ + path.resolve(__dirname, '..', '..', '..', 'backend'), + path.resolve(app.getAppPath(), '..', 'backend'), + path.resolve(process.cwd(), 'apps', 'backend') + ]; + + for (const backendPath of possibleBackendPaths) { + // Check for backend venv Python (has real_ladybug installed) + const venvPython = process.platform === 'win32' + ? path.join(backendPath, '.venv', 'Scripts', 'python.exe') + : path.join(backendPath, '.venv', 'bin', 'python'); + + if (fs.existsSync(venvPython)) { + console.log(`[MemoryService] Using backend venv Python: ${venvPython}`); + return venvPython; + } + } + + // Fall back to configured Python path + const fallbackPython = getConfiguredPythonPath(); + console.log(`[MemoryService] Backend venv not found, falling back to: ${fallbackPython}`); + return fallbackPython; +} + +/** + * Get the Python environment variables for memory queries. + * This ensures real_ladybug can be found in both dev and packaged modes. + */ +function getMemoryPythonEnv(): Record { + // Start with the standard Python environment from the manager + const baseEnv = pythonEnvManager.getPythonEnv(); + + // For packaged apps, ensure PYTHONPATH includes bundled site-packages + // even if the manager hasn't been fully initialized + if (app.isPackaged) { + const bundledSitePackages = path.join(process.resourcesPath, 'python-site-packages'); + if (fs.existsSync(bundledSitePackages)) { + // Merge paths: bundled site-packages takes precedence + const existingPath = baseEnv.PYTHONPATH || ''; + baseEnv.PYTHONPATH = existingPath + ? `${bundledSitePackages}${path.delimiter}${existingPath}` + : bundledSitePackages; + } + } + + return baseEnv; +} + /** * Execute a Python memory query command */ @@ -120,7 +184,10 @@ async function executeQuery( args: string[], timeout: number = 10000 ): Promise { - const pythonCmd = getConfiguredPythonPath(); + // Use getBackendPythonPath() to find the correct Python: + // - In dev mode: uses backend venv with real_ladybug installed + // - In packaged app: falls back to bundled Python + const pythonCmd = getBackendPythonPath(); const scriptPath = getQueryScriptPath(); if (!scriptPath) { @@ -131,9 +198,16 @@ async function executeQuery( return new Promise((resolve) => { const fullArgs = [...baseArgs, scriptPath, command, ...args]; + + // Get Python environment (includes PYTHONPATH for bundled/venv packages) + // This is critical for finding real_ladybug (LadybugDB) + const pythonEnv = getMemoryPythonEnv(); + const proc = spawn(pythonExe, fullArgs, { stdio: ['ignore', 'pipe', 'pipe'], timeout, + // Use pythonEnv which combines sanitized env + site-packages for real_ladybug + env: pythonEnv, }); let stdout = ''; @@ -148,19 +222,29 @@ async function executeQuery( }); proc.on('close', (code) => { - if (code === 0 && stdout) { + // The Python script outputs JSON to stdout (even for errors) + // Always try to parse stdout first to get the actual error message + if (stdout) { try { const result = JSON.parse(stdout); resolve(result); + return; } catch { + // JSON parsing failed + if (code !== 0) { + const errorMsg = stderr || stdout || `Process exited with code ${code}`; + console.error('[MemoryService] Python error:', errorMsg); + resolve({ success: false, error: errorMsg }); + return; + } resolve({ success: false, error: `Invalid JSON response: ${stdout}` }); + return; } - } else { - resolve({ - success: false, - error: stderr || `Process exited with code ${code}`, - }); } + // No stdout - use stderr or generic error + const errorMsg = stderr || `Process exited with code ${code}`; + console.error('[MemoryService] Python error (no stdout):', errorMsg); + resolve({ success: false, error: errorMsg }); }); proc.on('error', (err) => { @@ -183,7 +267,10 @@ async function executeSemanticQuery( embedderConfig: EmbedderConfig, timeout: number = 30000 // Longer timeout for embedding operations ): Promise { - const pythonCmd = getConfiguredPythonPath(); + // Use getBackendPythonPath() to find the correct Python: + // - In dev mode: uses backend venv with real_ladybug installed + // - In packaged app: falls back to bundled Python + const pythonCmd = getBackendPythonPath(); const scriptPath = getQueryScriptPath(); if (!scriptPath) { @@ -192,8 +279,13 @@ async function executeSemanticQuery( const [pythonExe, baseArgs] = parsePythonCommand(pythonCmd); + // Get Python environment (includes PYTHONPATH for bundled/venv packages) + // This is critical for finding real_ladybug (LadybugDB) + const pythonEnv = getMemoryPythonEnv(); + // Build environment with embedder configuration - const env: Record = { ...process.env }; + // Use pythonEnv which combines sanitized env + site-packages for real_ladybug + const env: Record = { ...pythonEnv }; // Set the embedder provider env.GRAPHITI_EMBEDDER_PROVIDER = embedderConfig.provider; @@ -272,19 +364,26 @@ async function executeSemanticQuery( }); proc.on('close', (code) => { - if (code === 0 && stdout) { + // The Python script outputs JSON to stdout (even for errors) + if (stdout) { try { const result = JSON.parse(stdout); resolve(result); + return; } catch { + if (code !== 0) { + const errorMsg = stderr || stdout || `Process exited with code ${code}`; + console.error('[MemoryService] Semantic search error:', errorMsg); + resolve({ success: false, error: errorMsg }); + return; + } resolve({ success: false, error: `Invalid JSON response: ${stdout}` }); + return; } - } else { - resolve({ - success: false, - error: stderr || `Process exited with code ${code}`, - }); } + const errorMsg = stderr || `Process exited with code ${code}`; + console.error('[MemoryService] Semantic search error (no stdout):', errorMsg); + resolve({ success: false, error: errorMsg }); }); proc.on('error', (err) => { @@ -526,6 +625,50 @@ export class MemoryService { }; } + /** + * Add an episode to the memory database + * + * This allows the Electron app to save memories (like PR review insights) + * directly to LadybugDB without going through the full Graphiti system. + * + * @param name Episode name/title + * @param content Episode content (will be JSON stringified if object) + * @param episodeType Type of episode (session_insight, pattern, gotcha, task_outcome, pr_review) + * @param groupId Optional group ID for namespacing + * @returns Promise with the created episode info + */ + async addEpisode( + name: string, + content: string | object, + episodeType: string = 'session_insight', + groupId?: string + ): Promise<{ success: boolean; id?: string; error?: string }> { + // Stringify content if it's an object + const contentStr = typeof content === 'object' ? JSON.stringify(content) : content; + + const args = [ + this.config.dbPath, + this.config.database, + '--name', name, + '--content', contentStr, + '--type', episodeType, + ]; + + if (groupId) { + args.push('--group-id', groupId); + } + + const result = await executeQuery('add-episode', args); + + if (!result.success) { + console.error('Failed to add episode:', result.error); + return { success: false, error: result.error }; + } + + const data = result.data as { id: string; name: string; type: string; timestamp: string }; + return { success: true, id: data.id }; + } + /** * Close the database connection (no-op for subprocess model) */ diff --git a/apps/frontend/src/main/project-store.ts b/apps/frontend/src/main/project-store.ts index 5d627c0160..dedc374fed 100644 --- a/apps/frontend/src/main/project-store.ts +++ b/apps/frontend/src/main/project-store.ts @@ -5,6 +5,7 @@ import { v4 as uuidv4 } from 'uuid'; import type { Project, ProjectSettings, Task, TaskStatus, TaskMetadata, ImplementationPlan, ReviewReason, PlanSubtask } from '../shared/types'; import { DEFAULT_PROJECT_SETTINGS, AUTO_BUILD_PATHS, getSpecsDir } from '../shared/constants'; import { getAutoBuildPath, isInitialized } from './project-initializer'; +import { getTaskWorktreeDir } from './worktree-paths'; interface TabState { openProjectIds: string[]; @@ -18,12 +19,19 @@ interface StoreData { tabState?: TabState; } +interface TasksCacheEntry { + tasks: Task[]; + timestamp: number; +} + /** * Persistent storage for projects and settings */ export class ProjectStore { private storePath: string; private data: StoreData; + private tasksCache: Map = new Map(); + private readonly CACHE_TTL_MS = 3000; // 3 seconds TTL for task cache constructor() { // Store in app's userData directory @@ -235,9 +243,19 @@ export class ProjectStore { /** * Get tasks for a project by scanning specs directory + * Implements caching with 3-second TTL to prevent excessive worktree scanning */ getTasks(projectId: string): Task[] { - console.warn('[ProjectStore] getTasks called with projectId:', projectId); + // Check cache first + const cached = this.tasksCache.get(projectId); + const now = Date.now(); + + if (cached && (now - cached.timestamp) < this.CACHE_TTL_MS) { + console.debug('[ProjectStore] Returning cached tasks for project:', projectId, '(age:', now - cached.timestamp, 'ms)'); + return cached.tasks; + } + + console.warn('[ProjectStore] getTasks called with projectId:', projectId, cached ? '(cache expired)' : '(cache miss)'); const project = this.getProject(projectId); if (!project) { console.warn('[ProjectStore] Project not found for id:', projectId); @@ -263,8 +281,7 @@ export class ProjectStore { // 2. Scan worktree specs directories // NOTE FOR MAINTAINERS: Worktree tasks are only included if the spec also exists in main. // This prevents deleted tasks from "coming back" when the worktree isn't cleaned up. - // Alternative behavior: include all worktree tasks (remove the mainSpecIds check below). - const worktreesDir = path.join(project.path, '.worktrees'); + const worktreesDir = getTaskWorktreeDir(project.path); if (existsSync(worktreesDir)) { try { const worktrees = readdirSync(worktreesDir, { withFileTypes: true }); @@ -303,9 +320,31 @@ export class ProjectStore { const tasks = Array.from(taskMap.values()); console.warn('[ProjectStore] Returning', tasks.length, 'unique tasks (after deduplication)'); + + // Update cache + this.tasksCache.set(projectId, { tasks, timestamp: now }); + return tasks; } + /** + * Invalidate the tasks cache for a specific project + * Call this when tasks are modified (created, deleted, status changed, etc.) + */ + invalidateTasksCache(projectId: string): void { + this.tasksCache.delete(projectId); + console.debug('[ProjectStore] Invalidated tasks cache for project:', projectId); + } + + /** + * Clear all tasks cache entries + * Useful for global refresh scenarios + */ + clearTasksCache(): void { + this.tasksCache.clear(); + console.debug('[ProjectStore] Cleared all tasks cache'); + } + /** * Load tasks from a specs directory (helper method for main project and worktrees) */ @@ -360,27 +399,8 @@ export class ProjectStore { const reqContent = readFileSync(requirementsPath, 'utf-8'); const requirements = JSON.parse(reqContent); if (requirements.task_description) { - // Extract a clean summary from task_description (first line or first ~200 chars) - const taskDesc = requirements.task_description; - const firstLine = taskDesc.split('\n')[0].trim(); - // If the first line is a title like "Investigate GitHub Issue #36", use the next meaningful line - if (firstLine.toLowerCase().startsWith('investigate') && taskDesc.includes('\n\n')) { - const sections = taskDesc.split('\n\n'); - // Find the first paragraph that's not a title - for (const section of sections) { - const trimmed = section.trim(); - // Skip headers and short lines - if (trimmed.startsWith('#') || trimmed.length < 20) continue; - // Skip the "Please analyze" instruction at the end - if (trimmed.startsWith('Please analyze')) continue; - description = trimmed.substring(0, 200).split('\n')[0]; - break; - } - } - // If still no description, use a shortened version of task_description - if (!description) { - description = firstLine.substring(0, 150); - } + // Use the full task description for the modal view + description = requirements.task_description; } } catch { // Ignore parse errors @@ -563,11 +583,16 @@ export class ProjectStore { // planStatus: "review" indicates spec creation is complete and awaiting user approval const isPlanReviewStage = (plan as unknown as { planStatus?: string })?.planStatus === 'review'; + // Determine if there is remaining work to do + // True if: no subtasks exist yet (planning in progress) OR some subtasks are incomplete + // This prevents 'in_progress' from overriding 'human_review' when all work is done + const hasRemainingWork = allSubtasks.length === 0 || allSubtasks.some((s) => s.status !== 'completed'); + const isStoredStatusValid = (storedStatus === calculatedStatus) || // Matches calculated - (storedStatus === 'human_review' && calculatedStatus === 'ai_review') || // Human review is more advanced than ai_review + (storedStatus === 'human_review' && (calculatedStatus === 'ai_review' || calculatedStatus === 'in_progress')) || // Human review is more advanced than ai_review or in_progress (fixes status loop bug) (storedStatus === 'human_review' && isPlanReviewStage) || // Plan review stage (awaiting spec approval) - (isActiveProcessStatus && storedStatus === 'in_progress'); // Planning/coding phases should show as in_progress + (isActiveProcessStatus && storedStatus === 'in_progress' && hasRemainingWork); // Planning/coding phases should show as in_progress ONLY when there's remaining work if (isStoredStatusValid) { // Preserve reviewReason for human_review status @@ -643,7 +668,7 @@ export class ProjectStore { } // 2. Check worktrees - const worktreesDir = path.join(projectPath, '.worktrees'); + const worktreesDir = getTaskWorktreeDir(projectPath); if (existsSync(worktreesDir)) { try { const worktrees = readdirSync(worktreesDir, { withFileTypes: true }); @@ -721,6 +746,9 @@ export class ProjectStore { } } + // Invalidate cache since task metadata changed + this.invalidateTasksCache(projectId); + return !hasErrors; } @@ -777,6 +805,9 @@ export class ProjectStore { } } + // Invalidate cache since task metadata changed + this.invalidateTasksCache(projectId); + return !hasErrors; } } diff --git a/apps/frontend/src/main/python-env-manager.ts b/apps/frontend/src/main/python-env-manager.ts index 608ba5fda5..bd31c72f13 100644 --- a/apps/frontend/src/main/python-env-manager.ts +++ b/apps/frontend/src/main/python-env-manager.ts @@ -122,19 +122,36 @@ export class PythonEnvManager extends EventEmitter { return false; } - // Check for the marker file that indicates successful bundling - const markerPath = path.join(sitePackagesPath, '.bundled'); - if (existsSync(markerPath)) { - console.log(`[PythonEnvManager] Found bundle marker, using bundled packages`); - return true; + // Critical packages that must exist for proper functionality + // This fixes GitHub issue #416 where marker exists but packages are missing + // Note: Same list exists in download-python.cjs - keep them in sync + // This validation assumes traditional Python packages with __init__.py (not PEP 420 namespace packages) + const criticalPackages = ['claude_agent_sdk', 'dotenv']; + + // Check each package exists with valid structure (directory + __init__.py) + const missingPackages = criticalPackages.filter((pkg) => { + const pkgPath = path.join(sitePackagesPath, pkg); + const initPath = path.join(pkgPath, '__init__.py'); + // Package is valid if directory and __init__.py both exist + return !existsSync(pkgPath) || !existsSync(initPath); + }); + + // Log missing packages for debugging + for (const pkg of missingPackages) { + console.log( + `[PythonEnvManager] Missing critical package: ${pkg} at ${path.join(sitePackagesPath, pkg)}` + ); } - // Fallback: check if key packages exist - // This handles cases where the marker might be missing but packages are there - const claudeSdkPath = path.join(sitePackagesPath, 'claude_agent_sdk'); - const dotenvPath = path.join(sitePackagesPath, 'dotenv'); - if (existsSync(claudeSdkPath) || existsSync(dotenvPath)) { - console.log(`[PythonEnvManager] Found key packages, using bundled packages`); + // All packages must exist - don't rely solely on marker file + if (missingPackages.length === 0) { + // Also check marker for logging purposes + const markerPath = path.join(sitePackagesPath, '.bundled'); + if (existsSync(markerPath)) { + console.log(`[PythonEnvManager] Found bundle marker and all critical packages`); + } else { + console.log(`[PythonEnvManager] Found critical packages (marker missing)`); + } return true; } @@ -619,23 +636,40 @@ if sys.version_info >= (3, 12): /** * Get environment variables that should be set when spawning Python processes. * This ensures Python finds the bundled packages or venv packages. + * + * IMPORTANT: This returns a COMPLETE environment (based on process.env) with + * problematic Python variables removed. This fixes the "Could not find platform + * independent libraries " error on Windows when PYTHONHOME is set. + * + * @see https://github.com/AndyMik90/Auto-Claude/issues/176 */ getPythonEnv(): Record { - const env: Record = { + // Start with process.env but explicitly remove problematic Python variables + // PYTHONHOME causes "Could not find platform independent libraries" when set + // to a different Python installation than the one we're spawning + const baseEnv: Record = {}; + + for (const [key, value] of Object.entries(process.env)) { + // Skip PYTHONHOME - it causes the "platform independent libraries" error + // Use case-insensitive check for Windows compatibility (env vars are case-insensitive on Windows) + // Skip undefined values (TypeScript type guard) + if (key.toUpperCase() !== 'PYTHONHOME' && value !== undefined) { + baseEnv[key] = value; + } + } + + // Apply our Python configuration on top + return { + ...baseEnv, // Don't write bytecode - not needed and avoids permission issues PYTHONDONTWRITEBYTECODE: '1', // Use UTF-8 encoding PYTHONIOENCODING: 'utf-8', // Disable user site-packages to avoid conflicts PYTHONNOUSERSITE: '1', + // Override PYTHONPATH if we have bundled packages + ...(this.sitePackagesPath ? { PYTHONPATH: this.sitePackagesPath } : {}), }; - - // Set PYTHONPATH to our site-packages - if (this.sitePackagesPath) { - env.PYTHONPATH = this.sitePackagesPath; - } - - return env; } /** diff --git a/apps/frontend/src/main/release-service.ts b/apps/frontend/src/main/release-service.ts index ed7367d5db..b05152256d 100644 --- a/apps/frontend/src/main/release-service.ts +++ b/apps/frontend/src/main/release-service.ts @@ -344,16 +344,12 @@ export class ReleaseService extends EventEmitter { tasks: Task[] ): Promise { const unmerged: UnmergedWorktreeInfo[] = []; - - // Get worktrees directory - const worktreesDir = path.join(projectPath, '.worktrees', 'auto-claude'); + const worktreesDir = path.join(projectPath, '.auto-claude', 'worktrees', 'tasks'); if (!existsSync(worktreesDir)) { - // No worktrees exist at all - all clear return []; } - // List all spec worktrees let worktreeFolders: string[]; try { worktreeFolders = readdirSync(worktreesDir, { withFileTypes: true }) @@ -366,17 +362,16 @@ export class ReleaseService extends EventEmitter { // Check each spec ID that's in this release for (const specId of releaseSpecIds) { // Find the worktree folder for this spec - // Spec IDs are like "001-feature-name", worktree folders match - const worktreeFolder = worktreeFolders.find(folder => + const matchingFolder = worktreeFolders.find(folder => folder === specId || folder.startsWith(`${specId}-`) ); - if (!worktreeFolder) { + if (!matchingFolder) { // No worktree for this spec - it's already merged/cleaned up continue; } - const worktreePath = path.join(worktreesDir, worktreeFolder); + const worktreePath = path.join(worktreesDir, matchingFolder); // Get the task info for better error messages const task = tasks.find(t => t.specId === specId); diff --git a/apps/frontend/src/main/sentry.ts b/apps/frontend/src/main/sentry.ts new file mode 100644 index 0000000000..0ab4e6602a --- /dev/null +++ b/apps/frontend/src/main/sentry.ts @@ -0,0 +1,167 @@ +/** + * Sentry Error Tracking for Main Process + * + * Initializes Sentry with: + * - beforeSend hook for mid-session toggle support (no restart needed) + * - Path masking for user privacy (shared with renderer) + * - IPC listener for settings changes from renderer + * + * Privacy Note: + * - Usernames are masked from all file paths + * - Project paths remain visible for debugging (this is expected) + * - Tags, contexts, extra data, and user info are all sanitized + */ + +import * as Sentry from '@sentry/electron/main'; +import { app, ipcMain } from 'electron'; +import { readSettingsFile } from './settings-utils'; +import { DEFAULT_APP_SETTINGS } from '../shared/constants'; +import { IPC_CHANNELS } from '../shared/constants/ipc'; +import { + processEvent, + PRODUCTION_TRACE_SAMPLE_RATE, + type SentryErrorEvent +} from '../shared/utils/sentry-privacy'; + +// In-memory state for current setting (updated via IPC when user toggles) +let sentryEnabledState = true; + +/** + * Get Sentry DSN from environment variable + * + * For local development/testing: + * - Add SENTRY_DSN to your .env file, or + * - Run: SENTRY_DSN=your-dsn npm start + * + * For CI/CD releases: + * - Set SENTRY_DSN as a GitHub Actions secret + * + * For forks: + * - Without SENTRY_DSN, Sentry is disabled (safe for forks) + */ +function getSentryDsn(): string { + return process.env.SENTRY_DSN || ''; +} + +/** + * Get trace sample rate from environment variable + * Controls performance monitoring sampling (0.0 to 1.0) + * Default: 0.1 (10%) in production, 0 in development + */ +function getTracesSampleRate(): number { + const envValue = process.env.SENTRY_TRACES_SAMPLE_RATE; + if (envValue !== undefined) { + const parsed = parseFloat(envValue); + if (!isNaN(parsed) && parsed >= 0 && parsed <= 1) { + return parsed; + } + } + // Default: 10% in production, 0 in dev + return app.isPackaged ? PRODUCTION_TRACE_SAMPLE_RATE : 0; +} + +/** + * Get profile sample rate from environment variable + * Controls profiling sampling relative to traces (0.0 to 1.0) + * Default: 0.1 (10%) in production, 0 in development + */ +function getProfilesSampleRate(): number { + const envValue = process.env.SENTRY_PROFILES_SAMPLE_RATE; + if (envValue !== undefined) { + const parsed = parseFloat(envValue); + if (!isNaN(parsed) && parsed >= 0 && parsed <= 1) { + return parsed; + } + } + // Default: 10% in production, 0 in dev + return app.isPackaged ? PRODUCTION_TRACE_SAMPLE_RATE : 0; +} + +// Cache config so renderer can access it via IPC +let cachedDsn: string = ''; +let cachedTracesSampleRate: number = 0; +let cachedProfilesSampleRate: number = 0; + +/** + * Initialize Sentry for the main process + * Called early in app startup, before window creation + */ +export function initSentryMain(): void { + // Get configuration from environment variables + cachedDsn = getSentryDsn(); + cachedTracesSampleRate = getTracesSampleRate(); + cachedProfilesSampleRate = getProfilesSampleRate(); + + // Read initial setting from disk synchronously + const savedSettings = readSettingsFile(); + const settings = { ...DEFAULT_APP_SETTINGS, ...savedSettings }; + sentryEnabledState = settings.sentryEnabled ?? true; + + // Check if we have a DSN - if not, Sentry is effectively disabled + const hasDsn = cachedDsn.length > 0; + const shouldEnable = hasDsn && (app.isPackaged || process.env.SENTRY_DEV === 'true'); + + if (!hasDsn) { + console.log('[Sentry] No SENTRY_DSN configured - error reporting disabled'); + console.log('[Sentry] To enable: set SENTRY_DSN environment variable'); + } + + Sentry.init({ + dsn: cachedDsn, + environment: app.isPackaged ? 'production' : 'development', + release: `auto-claude@${app.getVersion()}`, + + beforeSend(event: Sentry.ErrorEvent) { + if (!sentryEnabledState) { + return null; + } + // Process event with shared privacy utility + return processEvent(event as SentryErrorEvent) as Sentry.ErrorEvent; + }, + + // Sample rates from environment variables (default: 10% in production, 0 in dev) + tracesSampleRate: cachedTracesSampleRate, + profilesSampleRate: cachedProfilesSampleRate, + + // Only enable if we have a DSN and are in production (or SENTRY_DEV is set) + enabled: shouldEnable, + }); + + // Listen for settings changes from renderer process + ipcMain.on(IPC_CHANNELS.SENTRY_STATE_CHANGED, (_event, enabled: boolean) => { + sentryEnabledState = enabled; + console.log(`[Sentry] Error reporting ${enabled ? 'enabled' : 'disabled'} (via IPC)`); + }); + + // IPC handler for renderer to get Sentry config + ipcMain.handle(IPC_CHANNELS.GET_SENTRY_DSN, () => { + return cachedDsn; + }); + + ipcMain.handle(IPC_CHANNELS.GET_SENTRY_CONFIG, () => { + return { + dsn: cachedDsn, + tracesSampleRate: cachedTracesSampleRate, + profilesSampleRate: cachedProfilesSampleRate, + }; + }); + + if (hasDsn) { + console.log(`[Sentry] Main process initialized (enabled: ${sentryEnabledState}, traces: ${cachedTracesSampleRate}, profiles: ${cachedProfilesSampleRate})`); + } +} + +/** + * Get current Sentry enabled state + */ +export function isSentryEnabled(): boolean { + return sentryEnabledState; +} + +/** + * Set Sentry enabled state programmatically + */ +export function setSentryEnabled(enabled: boolean): void { + sentryEnabledState = enabled; + console.log(`[Sentry] Error reporting ${enabled ? 'enabled' : 'disabled'} (programmatic)`); +} diff --git a/apps/frontend/src/main/services/profile-service.test.ts b/apps/frontend/src/main/services/profile-service.test.ts new file mode 100644 index 0000000000..028e7c9bdf --- /dev/null +++ b/apps/frontend/src/main/services/profile-service.test.ts @@ -0,0 +1,1031 @@ +/** + * Tests for profile-service.ts + * + * Red phase - write failing tests first + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { + validateBaseUrl, + validateApiKey, + validateProfileNameUnique, + createProfile, + updateProfile, + getAPIProfileEnv, + testConnection +} from './profile-service'; +import type { APIProfile, ProfilesFile, TestConnectionResult } from '../../shared/types/profile'; + +// Mock profile-manager +vi.mock('../utils/profile-manager', () => ({ + loadProfilesFile: vi.fn(), + saveProfilesFile: vi.fn(), + generateProfileId: vi.fn(() => 'mock-uuid-1234') +})); + +describe('profile-service', () => { + describe('validateBaseUrl', () => { + it('should accept valid HTTPS URLs', () => { + expect(validateBaseUrl('https://api.anthropic.com')).toBe(true); + expect(validateBaseUrl('https://custom-api.example.com')).toBe(true); + expect(validateBaseUrl('https://api.example.com/v1')).toBe(true); + }); + + it('should accept valid HTTP URLs', () => { + expect(validateBaseUrl('http://localhost:8080')).toBe(true); + expect(validateBaseUrl('http://127.0.0.1:8000')).toBe(true); + }); + + it('should reject invalid URLs', () => { + expect(validateBaseUrl('not-a-url')).toBe(false); + expect(validateBaseUrl('ftp://example.com')).toBe(false); + expect(validateBaseUrl('')).toBe(false); + expect(validateBaseUrl('https://')).toBe(false); + }); + + it('should reject URLs without valid format', () => { + expect(validateBaseUrl('anthropic.com')).toBe(false); + expect(validateBaseUrl('://api.anthropic.com')).toBe(false); + }); + }); + + describe('validateApiKey', () => { + it('should accept Anthropic API key format (sk-ant-...)', () => { + expect(validateApiKey('sk-ant-api03-12345')).toBe(true); + expect(validateApiKey('sk-ant-test-key')).toBe(true); + }); + + it('should accept OpenAI API key format (sk-...)', () => { + expect(validateApiKey('sk-proj-12345')).toBe(true); + expect(validateApiKey('sk-test-key-12345')).toBe(true); + }); + + it('should accept custom API keys with reasonable length', () => { + expect(validateApiKey('custom-key-12345678')).toBe(true); + expect(validateApiKey('x-api-key-abcdefghij')).toBe(true); + }); + + it('should reject empty or too short keys', () => { + expect(validateApiKey('')).toBe(false); + expect(validateApiKey('sk-')).toBe(false); + expect(validateApiKey('abc')).toBe(false); + }); + + it('should reject keys with only whitespace', () => { + expect(validateApiKey(' ')).toBe(false); + expect(validateApiKey('\t\n')).toBe(false); + }); + }); + + describe('validateProfileNameUnique', () => { + it('should return true when name is unique', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique('New Profile'); + expect(result).toBe(true); + }); + + it('should return false when name already exists', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique('Existing Profile'); + expect(result).toBe(false); + }); + + it('should be case-insensitive for duplicate detection', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result1 = await validateProfileNameUnique('my profile'); + const result2 = await validateProfileNameUnique('MY PROFILE'); + expect(result1).toBe(false); + expect(result2).toBe(false); + }); + + it('should trim whitespace before checking', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique(' My Profile '); + expect(result).toBe(false); + }); + }); + + describe('createProfile', () => { + it('should create profile with valid data and save', async () => { + const mockFile: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile, generateProfileId } = + await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(generateProfileId).mockReturnValue('generated-id-123'); + + const input = { + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key', + models: { + default: 'claude-3-5-sonnet-20241022' + } + }; + + const result = await createProfile(input); + + expect(result).toMatchObject({ + id: 'generated-id-123', + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key', + models: { + default: 'claude-3-5-sonnet-20241022' + } + }); + expect(result.createdAt).toBeGreaterThan(0); + expect(result.updatedAt).toBeGreaterThan(0); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + + it('should throw error for invalid base URL', async () => { + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue({ + profiles: [], + activeProfileId: null, + version: 1 + }); + + const input = { + name: 'Test Profile', + baseUrl: 'not-a-url', + apiKey: 'sk-ant-test-key' + }; + + await expect(createProfile(input)).rejects.toThrow('Invalid base URL'); + }); + + it('should throw error for invalid API key', async () => { + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue({ + profiles: [], + activeProfileId: null, + version: 1 + }); + + const input = { + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'too-short' + }; + + await expect(createProfile(input)).rejects.toThrow('Invalid API key'); + }); + + it('should throw error for duplicate profile name', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + name: 'Existing Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key' + }; + + await expect(createProfile(input)).rejects.toThrow( + 'A profile with this name already exists' + ); + }); + }); + + describe('updateProfile', () => { + it('should update profile name and other fields', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Old Name', + baseUrl: 'https://old-api.example.com', + apiKey: 'sk-old-key-12345678', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + + const input = { + id: 'existing-id', + name: 'New Name', + baseUrl: 'https://new-api.example.com', + apiKey: 'sk-new-api-key-123', + models: { default: 'claude-3-5-sonnet-20241022' } + }; + + const result = await updateProfile(input); + + expect(result.name).toBe('New Name'); + expect(result.baseUrl).toBe('https://new-api.example.com'); + expect(result.apiKey).toBe('sk-new-api-key-123'); + expect(result.models).toEqual({ default: 'claude-3-5-sonnet-20241022' }); + expect(result.updatedAt).toBeGreaterThan(1000000); // updatedAt should be refreshed + expect(result.createdAt).toBe(1000000); // createdAt should remain unchanged + }); + + it('should allow updating profile with same name (case-insensitive)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-old-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + + const input = { + id: 'existing-id', + name: 'my profile', // Same name, different case + baseUrl: 'https://new-api.example.com', + apiKey: 'sk-new-api-key-456' + }; + + const result = await updateProfile(input); + expect(result.name).toBe('my profile'); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + + it('should throw error when name conflicts with another profile', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Profile One', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678', + createdAt: 1000000, + updatedAt: 1000000 + }, + { + id: 'profile-2', + name: 'Profile Two', + baseUrl: 'https://api2.example.com', + apiKey: 'sk-key-two-12345678', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'profile-1', + name: 'Profile Two', // Name that exists on profile-2 + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678' + }; + + await expect(updateProfile(input)).rejects.toThrow( + 'A profile with this name already exists' + ); + }); + + it('should throw error for invalid base URL', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'not-a-url', + apiKey: 'sk-test-api-key-123' + }; + + await expect(updateProfile(input)).rejects.toThrow('Invalid base URL'); + }); + + it('should throw error for invalid API key', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'too-short' + }; + + await expect(updateProfile(input)).rejects.toThrow('Invalid API key'); + }); + + it('should throw error when profile not found', async () => { + const mockFile: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'non-existent-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123' + }; + + await expect(updateProfile(input)).rejects.toThrow('Profile not found'); + }); + }); + + describe('getAPIProfileEnv', () => { + it('should return empty object when no active profile (OAuth mode)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, // No active profile = OAuth mode + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + expect(result).toEqual({}); + }); + + it('should return empty object when activeProfileId is empty string', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: '', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + expect(result).toEqual({}); + }); + + it('should return correct env vars for active profile with all fields', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.custom.com', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022', + haiku: 'claude-3-5-haiku-20241022', + sonnet: 'claude-3-5-sonnet-20241022', + opus: 'claude-3-5-opus-20241022' + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.custom.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022', + ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022' + }); + }); + + it('should filter out empty string values', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: '', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022', + haiku: '', + sonnet: '' + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + // Empty baseUrl should be filtered out + expect(result).not.toHaveProperty('ANTHROPIC_BASE_URL'); + // Empty model values should be filtered out + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL'); + // Non-empty values should be present + expect(result).toEqual({ + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022' + }); + }); + + it('should handle missing models object', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + // No models property + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.example.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678' + }); + expect(result).not.toHaveProperty('ANTHROPIC_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_OPUS_MODEL'); + }); + + it('should handle partial model configurations', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022' + // Only default model set + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.example.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022' + }); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_OPUS_MODEL'); + }); + + it('should find active profile by id when multiple profiles exist', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Profile One', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + }, + { + id: 'profile-2', + name: 'Profile Two', + baseUrl: 'https://api2.example.com', + apiKey: 'sk-key-two-12345678', + models: { default: 'claude-3-5-sonnet-20241022' }, + createdAt: Date.now(), + updatedAt: Date.now() + }, + { + id: 'profile-3', + name: 'Profile Three', + baseUrl: 'https://api3.example.com', + apiKey: 'sk-key-three-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-2', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api2.example.com', + ANTHROPIC_AUTH_TOKEN: 'sk-key-two-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022' + }); + }); + + it('should handle profile not found (activeProfileId points to non-existent profile)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Profile One', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'non-existent-id', // Points to profile that doesn't exist + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + // Should return empty object gracefully + expect(result).toEqual({}); + }); + + it('should trim whitespace from values before filtering', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: ' https://api.example.com ', // Has whitespace + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + // Whitespace should be trimmed, not filtered out + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.example.com', // Trimmed + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678' + }); + }); + + it('should filter out whitespace-only values', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: ' ', // Whitespace only + apiKey: 'sk-test-key-12345678', + models: { + default: ' ' // Whitespace only + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('../utils/profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + // Whitespace-only values should be filtered out + expect(result).not.toHaveProperty('ANTHROPIC_BASE_URL'); + expect(result).not.toHaveProperty('ANTHROPIC_MODEL'); + expect(result).toEqual({ + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678' + }); + }); + }); + + describe('testConnection', () => { + beforeEach(() => { + // Mock fetch globally for testConnection tests + global.fetch = vi.fn(); + }); + + it('should return success for valid credentials (200 response)', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: true, + status: 200, + json: async () => ({ data: [] }) + } as Response); + + const result = await testConnection('https://api.anthropic.com', 'sk-ant-test-key-12'); + + expect(result).toEqual({ + success: true, + message: 'Connection successful' + }); + expect(global.fetch).toHaveBeenCalledWith( + 'https://api.anthropic.com/v1/models', + expect.objectContaining({ + method: 'GET', + headers: expect.objectContaining({ + 'x-api-key': 'sk-ant-test-key-12', + 'anthropic-version': '2023-06-01' + }) + }) + ); + }); + + it('should return auth error for invalid API key (401 response)', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: false, + status: 401, + statusText: 'Unauthorized' + } as Response); + + const result = await testConnection('https://api.anthropic.com', 'sk-invalid-key-12'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + }); + + it('should return auth error for 403 response', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: false, + status: 403, + statusText: 'Forbidden' + } as Response); + + const result = await testConnection('https://api.anthropic.com', 'sk-forbidden-key'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + }); + + it('should return endpoint error for invalid URL (404 response)', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: false, + status: 404, + statusText: 'Not Found' + } as Response); + + const result = await testConnection('https://invalid.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }); + }); + + it('should return network error for connection refused', async () => { + const networkError = new TypeError('Failed to fetch'); + (networkError as any).code = 'ECONNREFUSED'; + + vi.mocked(global.fetch).mockRejectedValue(networkError); + + const result = await testConnection('https://unreachable.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }); + }); + + it('should return network error for ENOTFOUND (DNS failure)', async () => { + const dnsError = new TypeError('Failed to fetch'); + (dnsError as any).code = 'ENOTFOUND'; + + vi.mocked(global.fetch).mockRejectedValue(dnsError); + + const result = await testConnection('https://nosuchdomain.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }); + }); + + it('should return timeout error for AbortError', async () => { + const abortError = new Error('Aborted'); + abortError.name = 'AbortError'; + + vi.mocked(global.fetch).mockRejectedValue(abortError); + + const result = await testConnection('https://slow.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }); + }); + + it('should return unknown error for other failures', async () => { + vi.mocked(global.fetch).mockRejectedValue(new Error('Unknown error')); + + const result = await testConnection('https://api.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'unknown', + message: 'Connection test failed. Please try again.' + }); + }); + + it('should auto-prepend https:// if missing', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: true, + status: 200, + json: async () => ({ data: [] }) + } as Response); + + await testConnection('api.anthropic.com', 'sk-test-key-12chars'); + + expect(global.fetch).toHaveBeenCalledWith( + 'https://api.anthropic.com/v1/models', + expect.any(Object) + ); + }); + + it('should remove trailing slash from baseUrl', async () => { + vi.mocked(global.fetch).mockResolvedValue({ + ok: true, + status: 200, + json: async () => ({ data: [] }) + } as Response); + + await testConnection('https://api.anthropic.com/', 'sk-test-key-12chars'); + + expect(global.fetch).toHaveBeenCalledWith( + 'https://api.anthropic.com/v1/models', + expect.any(Object) + ); + }); + + it('should return error for empty baseUrl', async () => { + const result = await testConnection('', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }); + expect(global.fetch).not.toHaveBeenCalled(); + }); + + it('should return error for invalid baseUrl format', async () => { + const result = await testConnection('ftp://invalid-protocol.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }); + expect(global.fetch).not.toHaveBeenCalled(); + }); + + it('should return error for invalid API key format', async () => { + const result = await testConnection('https://api.anthropic.com', 'short'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + expect(global.fetch).not.toHaveBeenCalled(); + }); + + it('should abort when signal is triggered', async () => { + const abortController = new AbortController(); + const abortError = new Error('Aborted'); + abortError.name = 'AbortError'; + + vi.mocked(global.fetch).mockRejectedValue(abortError); + + // Abort immediately + abortController.abort(); + + const result = await testConnection('https://api.anthropic.com', 'sk-test-key-12chars', abortController.signal); + + expect(result).toEqual({ + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }); + }); + + it('should set 10 second timeout', async () => { + vi.mocked(global.fetch).mockImplementation(() => + new Promise((_, reject) => { + setTimeout(() => { + const abortError = new Error('Aborted'); + abortError.name = 'AbortError'; + reject(abortError); + }, 100); // Short delay for test + }) + ); + + const startTime = Date.now(); + const result = await testConnection('https://slow.example.com', 'sk-test-key-12chars'); + const elapsed = Date.now() - startTime; + + expect(result).toEqual({ + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }); + // Should timeout at 10 seconds, but we use a mock for faster test + expect(elapsed).toBeLessThan(5000); // Well under 10s due to mock + }); + }); +}); diff --git a/apps/frontend/src/main/services/profile-service.ts b/apps/frontend/src/main/services/profile-service.ts new file mode 100644 index 0000000000..a58651ac56 --- /dev/null +++ b/apps/frontend/src/main/services/profile-service.ts @@ -0,0 +1,510 @@ +/** + * Profile Service - Validation and profile creation + * + * Provides validation functions for URL, API key, and profile name uniqueness. + * Handles creating new profiles with validation. + */ + +import { loadProfilesFile, saveProfilesFile, generateProfileId } from '../utils/profile-manager'; +import type { APIProfile, TestConnectionResult } from '../../shared/types/profile'; + +/** + * Validate base URL format + * Accepts HTTP(S) URLs with valid endpoints + */ +export function validateBaseUrl(baseUrl: string): boolean { + if (!baseUrl || baseUrl.trim() === '') { + return false; + } + + try { + const url = new URL(baseUrl); + // Only allow http and https protocols + return url.protocol === 'http:' || url.protocol === 'https:'; + } catch { + return false; + } +} + +/** + * Validate API key format + * Accepts various API key formats (Anthropic, OpenAI, custom) + */ +export function validateApiKey(apiKey: string): boolean { + if (!apiKey || apiKey.trim() === '') { + return false; + } + + const trimmed = apiKey.trim(); + + // Too short to be a real API key + if (trimmed.length < 12) { + return false; + } + + // Accept common API key formats + // Anthropic: sk-ant-... + // OpenAI: sk-proj-... or sk-... + // Custom: any reasonable length key with alphanumeric chars + const hasValidChars = /^[a-zA-Z0-9\-_+.]+$/.test(trimmed); + + return hasValidChars; +} + +/** + * Validate that profile name is unique (case-insensitive, trimmed) + */ +export async function validateProfileNameUnique(name: string): Promise { + const trimmed = name.trim().toLowerCase(); + + const file = await loadProfilesFile(); + + // Check if any profile has the same name (case-insensitive) + const exists = file.profiles.some( + (p) => p.name.trim().toLowerCase() === trimmed + ); + + return !exists; +} + +/** + * Input type for creating a profile (without id, createdAt, updatedAt) + */ +export type CreateProfileInput = Omit; + +/** + * Input type for updating a profile (with id, without createdAt, updatedAt) + */ +export type UpdateProfileInput = Pick & CreateProfileInput; + +/** + * Delete a profile with validation + * Throws errors for validation failures + */ +export async function deleteProfile(id: string): Promise { + const file = await loadProfilesFile(); + + // Find the profile + const profileIndex = file.profiles.findIndex((p) => p.id === id); + if (profileIndex === -1) { + throw new Error('Profile not found'); + } + + const profile = file.profiles[profileIndex]; + + // Active Profile Check: Cannot delete active profile (AC3) + if (file.activeProfileId === id) { + throw new Error('Cannot delete active profile. Please switch to another profile or OAuth first.'); + } + + // Remove profile + file.profiles.splice(profileIndex, 1); + + // Last Profile Fallback: If no profiles remain, set activeProfileId to null (AC4) + if (file.profiles.length === 0) { + file.activeProfileId = null; + } + + // Save to disk + await saveProfilesFile(file); +} + +/** + * Create a new profile with validation + * Throws errors for validation failures + */ +export async function createProfile(input: CreateProfileInput): Promise { + // Validate base URL + if (!validateBaseUrl(input.baseUrl)) { + throw new Error('Invalid base URL'); + } + + // Validate API key + if (!validateApiKey(input.apiKey)) { + throw new Error('Invalid API key'); + } + + // Validate profile name uniqueness + const isUnique = await validateProfileNameUnique(input.name); + if (!isUnique) { + throw new Error('A profile with this name already exists'); + } + + // Load existing profiles + const file = await loadProfilesFile(); + + // Create new profile + const now = Date.now(); + const newProfile: APIProfile = { + id: generateProfileId(), + name: input.name.trim(), + baseUrl: input.baseUrl.trim(), + apiKey: input.apiKey.trim(), + models: input.models, + createdAt: now, + updatedAt: now + }; + + // Add to profiles list + file.profiles.push(newProfile); + + // Set as active if it's the first profile + if (file.profiles.length === 1) { + file.activeProfileId = newProfile.id; + } + + // Save to disk + await saveProfilesFile(file); + + return newProfile; +} + +/** + * Update an existing profile with validation + * Throws errors for validation failures + */ +export async function updateProfile(input: UpdateProfileInput): Promise { + // Validate base URL + if (!validateBaseUrl(input.baseUrl)) { + throw new Error('Invalid base URL'); + } + + // Validate API key + if (!validateApiKey(input.apiKey)) { + throw new Error('Invalid API key'); + } + + // Load existing profiles + const file = await loadProfilesFile(); + + // Find the profile + const profileIndex = file.profiles.findIndex((p) => p.id === input.id); + if (profileIndex === -1) { + throw new Error('Profile not found'); + } + + const existingProfile = file.profiles[profileIndex]; + + // Validate profile name uniqueness (exclude current profile from check) + if (input.name.trim().toLowerCase() !== existingProfile.name.trim().toLowerCase()) { + const trimmed = input.name.trim().toLowerCase(); + const nameExists = file.profiles.some( + (p) => p.id !== input.id && p.name.trim().toLowerCase() === trimmed + ); + if (nameExists) { + throw new Error('A profile with this name already exists'); + } + } + + // Update profile (including name) + const updatedProfile: APIProfile = { + ...existingProfile, + name: input.name.trim(), + baseUrl: input.baseUrl.trim(), + apiKey: input.apiKey.trim(), + models: input.models, + updatedAt: Date.now() + }; + + // Replace in profiles list + file.profiles[profileIndex] = updatedProfile; + + // Save to disk + await saveProfilesFile(file); + + return updatedProfile; +} + +/** + * Get environment variables for the active API profile + * + * Maps the active API profile to SDK environment variables for injection + * into Python subprocess. Returns empty object when no profile is active + * (OAuth mode), allowing CLAUDE_CODE_OAUTH_TOKEN to be used instead. + * + * Environment Variable Mapping: + * - profile.baseUrl โ†’ ANTHROPIC_BASE_URL + * - profile.apiKey โ†’ ANTHROPIC_AUTH_TOKEN + * - profile.models.default โ†’ ANTHROPIC_MODEL + * - profile.models.haiku โ†’ ANTHROPIC_DEFAULT_HAIKU_MODEL + * - profile.models.sonnet โ†’ ANTHROPIC_DEFAULT_SONNET_MODEL + * - profile.models.opus โ†’ ANTHROPIC_DEFAULT_OPUS_MODEL + * + * Empty string values are filtered out (not set as env vars). + * + * @returns Promise> Environment variables for active profile + */ +export async function getAPIProfileEnv(): Promise> { + // Load profiles.json + const file = await loadProfilesFile(); + + // If no active profile (null/empty), return empty object (OAuth mode) + if (!file.activeProfileId || file.activeProfileId === '') { + return {}; + } + + // Find active profile by activeProfileId + const profile = file.profiles.find((p) => p.id === file.activeProfileId); + + // If profile not found, return empty object (shouldn't happen with valid data) + if (!profile) { + return {}; + } + + // Map profile fields to SDK env vars + const envVars: Record = { + ANTHROPIC_BASE_URL: profile.baseUrl || '', + ANTHROPIC_AUTH_TOKEN: profile.apiKey || '', + ANTHROPIC_MODEL: profile.models?.default || '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: profile.models?.haiku || '', + ANTHROPIC_DEFAULT_SONNET_MODEL: profile.models?.sonnet || '', + ANTHROPIC_DEFAULT_OPUS_MODEL: profile.models?.opus || '', + }; + + // Filter out empty/whitespace string values (only set env vars that have values) + // This handles empty strings, null, undefined, and whitespace-only values + const filteredEnvVars: Record = {}; + for (const [key, value] of Object.entries(envVars)) { + const trimmedValue = value?.trim(); + if (trimmedValue && trimmedValue !== '') { + filteredEnvVars[key] = trimmedValue; + } + } + + return filteredEnvVars; +} + +/** + * Test API profile connection + * + * Validates credentials by making a minimal API request to the /v1/models endpoint. + * Returns detailed error information for different failure types. + * + * @param baseUrl - API base URL (will be normalized) + * @param apiKey - API key for authentication + * @param signal - Optional AbortSignal for cancelling the request + * @returns Promise Result of connection test + */ +export async function testConnection( + baseUrl: string, + apiKey: string, + signal?: AbortSignal +): Promise { + // Validate API key first (key format doesn't depend on URL normalization) + if (!validateApiKey(apiKey)) { + return { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + } + + // Normalize baseUrl BEFORE validation (allows auto-prepending https://) + let normalizedUrl = baseUrl.trim(); + + // Store original URL for error suggestions + const originalUrl = normalizedUrl; + + // If empty, return error + if (!normalizedUrl) { + return { + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }; + } + + // Ensure https:// prefix (auto-prepend if NO protocol exists) + // Check if URL already has a protocol (contains ://) + if (!normalizedUrl.includes('://')) { + normalizedUrl = `https://${normalizedUrl}`; + } + + // Remove trailing slash + normalizedUrl = normalizedUrl.replace(/\/+$/, ''); + + // Helper function to generate URL suggestions + const getUrlSuggestions = (url: string): string[] => { + const suggestions: string[] = []; + + // Check if URL lacks https:// + if (!url.includes('://')) { + suggestions.push('Ensure URL starts with https://'); + } + + // Check for trailing slash + if (url.endsWith('/')) { + suggestions.push('Remove trailing slashes from URL'); + } + + // Check for suspicious domain patterns (common typos) + const domainMatch = url.match(/:\/\/([^/]+)/); + if (domainMatch) { + const domain = domainMatch[1]; + // Check for common typos like anthropiic, ap, etc. + if (domain.includes('anthropiic') || domain.includes('anthhropic') || + domain.includes('anhtropic') || domain.length < 10) { + suggestions.push('Check for typos in domain name'); + } + } + + return suggestions; + }; + + // Validate the normalized baseUrl + if (!validateBaseUrl(normalizedUrl)) { + // Generate suggestions based on original URL + const suggestions = getUrlSuggestions(originalUrl); + const message = suggestions.length > 0 + ? `Invalid endpoint. Please check the Base URL.${suggestions.map(s => ' ' + s).join('')}` + : 'Invalid endpoint. Please check the Base URL.'; + + return { + success: false, + errorType: 'endpoint', + message + }; + } + + // Set timeout to 10 seconds (NFR-P3 compliance) + const timeoutController = new AbortController(); + const timeoutId = setTimeout(() => timeoutController.abort(), 10000); + + // Create a combined controller that aborts when either timeout or external signal aborts + const combinedController = new AbortController(); + + // Cleanup function for event listeners + const cleanup = () => { + clearTimeout(timeoutId); + }; + + // Listen to timeout abort + const onTimeoutAbort = () => { + cleanup(); + combinedController.abort(); + }; + timeoutController.signal.addEventListener('abort', onTimeoutAbort); + + // Listen to external signal abort (if provided) + let onExternalAbort: (() => void) | undefined; + if (signal) { + // If external signal already aborted, abort immediately + if (signal.aborted) { + cleanup(); + timeoutController.signal.removeEventListener('abort', onTimeoutAbort); + return { + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }; + } + + // Listen to external signal abort + onExternalAbort = () => { + cleanup(); + timeoutController.signal.removeEventListener('abort', onTimeoutAbort); + combinedController.abort(); + }; + signal.addEventListener('abort', onExternalAbort); + } + + const combinedSignal = combinedController.signal; + + try { + // Make minimal API request + const response = await fetch(`${normalizedUrl}/v1/models`, { + method: 'GET', + headers: { + 'x-api-key': apiKey, + 'anthropic-version': '2023-06-01' + }, + signal: combinedSignal + }); + + // Clear timeout on successful response + cleanup(); + if (onTimeoutAbort) { + timeoutController.signal.removeEventListener('abort', onTimeoutAbort); + } + if (signal && onExternalAbort) { + signal.removeEventListener('abort', onExternalAbort); + } + + // Parse response and determine error type + if (response.status === 200 || response.status === 201) { + return { + success: true, + message: 'Connection successful' + }; + } + + if (response.status === 401 || response.status === 403) { + return { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + } + + if (response.status === 404) { + // Generate URL suggestions for 404 errors + const suggestions = getUrlSuggestions(baseUrl.trim()); + const message = suggestions.length > 0 + ? `Invalid endpoint. Please check the Base URL.${suggestions.map(s => ' ' + s).join('')}` + : 'Invalid endpoint. Please check the Base URL.'; + + return { + success: false, + errorType: 'endpoint', + message + }; + } + + // Other HTTP errors + return { + success: false, + errorType: 'unknown', + message: 'Connection test failed. Please try again.' + }; + } catch (error) { + // Cleanup event listeners and timeout + cleanup(); + if (onTimeoutAbort) { + timeoutController.signal.removeEventListener('abort', onTimeoutAbort); + } + if (signal && onExternalAbort) { + signal.removeEventListener('abort', onExternalAbort); + } + + // Determine error type from error object + if (error instanceof Error) { + // AbortError โ†’ timeout + if (error.name === 'AbortError') { + return { + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }; + } + + // TypeError with ECONNREFUSED/ENOTFOUND โ†’ network error + if (error instanceof TypeError) { + const errorCode = (error as any).code; + if (errorCode === 'ECONNREFUSED' || errorCode === 'ENOTFOUND') { + return { + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }; + } + } + } + + // Other errors + return { + success: false, + errorType: 'unknown', + message: 'Connection test failed. Please try again.' + }; + } +} diff --git a/apps/frontend/src/main/services/profile/index.ts b/apps/frontend/src/main/services/profile/index.ts new file mode 100644 index 0000000000..1980eb0300 --- /dev/null +++ b/apps/frontend/src/main/services/profile/index.ts @@ -0,0 +1,43 @@ +/** + * Profile Service - Barrel Export + * + * Re-exports all profile-related functionality for convenient importing. + * Main process code should import from this index file. + */ + +// Profile Manager utilities +export { + loadProfilesFile, + saveProfilesFile, + generateProfileId, + validateFilePermissions, + getProfilesFilePath, + withProfilesLock, + atomicModifyProfiles +} from './profile-manager'; + +// Profile Service +export { + validateBaseUrl, + validateApiKey, + validateProfileNameUnique, + createProfile, + updateProfile, + deleteProfile, + getAPIProfileEnv, + testConnection, + discoverModels +} from './profile-service'; + +export type { CreateProfileInput, UpdateProfileInput } from './profile-service'; + +// Re-export types from shared for convenience +export type { + APIProfile, + ProfilesFile, + ProfileFormData, + TestConnectionResult, + ModelInfo, + DiscoverModelsResult, + DiscoverModelsError +} from '@shared/types/profile'; diff --git a/apps/frontend/src/main/services/profile/profile-manager.test.ts b/apps/frontend/src/main/services/profile/profile-manager.test.ts new file mode 100644 index 0000000000..e2e336588b --- /dev/null +++ b/apps/frontend/src/main/services/profile/profile-manager.test.ts @@ -0,0 +1,208 @@ +/** + * Tests for profile-manager.ts + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { + loadProfilesFile, + saveProfilesFile, + generateProfileId, + validateFilePermissions +} from './profile-manager'; +import type { ProfilesFile } from '@shared/types/profile'; + +// Use vi.hoisted to define mock functions that need to be accessible in vi.mock +const { fsMocks } = vi.hoisted(() => ({ + fsMocks: { + readFile: vi.fn(), + writeFile: vi.fn(), + mkdir: vi.fn(), + chmod: vi.fn(), + access: vi.fn(), + unlink: vi.fn(), + rename: vi.fn() + } +})); + +// Mock Electron app.getPath +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') { + return '/mock/userdata'; + } + return '/mock/path'; + }) + } +})); + +// Mock proper-lockfile +vi.mock('proper-lockfile', () => ({ + default: { + lock: vi.fn().mockResolvedValue(vi.fn().mockResolvedValue(undefined)) + } +})); + +// Mock fs module +vi.mock('fs', () => ({ + default: { + promises: fsMocks + }, + promises: fsMocks, + existsSync: vi.fn(), + constants: { + O_RDONLY: 0, + S_IRUSR: 0o400 + } +})); + +describe('profile-manager', () => { + beforeEach(() => { + vi.clearAllMocks(); + // Setup default mocks to resolve + fsMocks.mkdir.mockResolvedValue(undefined); + fsMocks.writeFile.mockResolvedValue(undefined); + fsMocks.chmod.mockResolvedValue(undefined); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('loadProfilesFile', () => { + it('should return default profiles file when file does not exist', async () => { + fsMocks.readFile.mockRejectedValue(new Error('ENOENT')); + + const result = await loadProfilesFile(); + + expect(result).toEqual({ + profiles: [], + activeProfileId: null, + version: 1 + }); + }); + + it('should return default profiles file when file is corrupted JSON', async () => { + fsMocks.readFile.mockResolvedValue(Buffer.from('invalid json{')); + + const result = await loadProfilesFile(); + + expect(result).toEqual({ + profiles: [], + activeProfileId: null, + version: 1 + }); + }); + + it('should load valid profiles file', async () => { + const mockData: ProfilesFile = { + profiles: [ + { + id: 'test-id-1', + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-test-key', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'test-id-1', + version: 1 + }; + + fsMocks.readFile.mockResolvedValue( + Buffer.from(JSON.stringify(mockData)) + ); + + const result = await loadProfilesFile(); + + expect(result).toEqual(mockData); + }); + + it('should use auto-claude directory for profiles.json path', async () => { + fsMocks.readFile.mockRejectedValue(new Error('ENOENT')); + + await loadProfilesFile(); + + // Verify the file path includes auto-claude + const readFileCalls = fsMocks.readFile.mock.calls; + const filePath = readFileCalls[0]?.[0]; + expect(filePath).toContain('auto-claude'); + expect(filePath).toContain('profiles.json'); + }); + }); + + describe('saveProfilesFile', () => { + it('should write profiles file to disk', async () => { + const mockData: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + await saveProfilesFile(mockData); + + expect(fsMocks.writeFile).toHaveBeenCalled(); + const writeFileCall = fsMocks.writeFile.mock.calls[0]; + const filePath = writeFileCall?.[0]; + const content = writeFileCall?.[1]; + + expect(filePath).toContain('auto-claude'); + expect(filePath).toContain('profiles.json'); + expect(content).toBe(JSON.stringify(mockData, null, 2)); + }); + + it('should throw error when write fails', async () => { + const mockData: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + fsMocks.writeFile.mockRejectedValue(new Error('Write failed')); + + await expect(saveProfilesFile(mockData)).rejects.toThrow('Write failed'); + }); + }); + + describe('generateProfileId', () => { + it('should generate unique UUID v4 format IDs', () => { + const id1 = generateProfileId(); + const id2 = generateProfileId(); + + // UUID v4 format: xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx + expect(id1).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/); + expect(id2).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/); + + // IDs should be unique + expect(id1).not.toBe(id2); + }); + + it('should generate different IDs on consecutive calls', () => { + const ids = new Set(); + for (let i = 0; i < 100; i++) { + ids.add(generateProfileId()); + } + expect(ids.size).toBe(100); + }); + }); + + describe('validateFilePermissions', () => { + it('should validate user-readable only file permissions', async () => { + // Mock successful chmod + fsMocks.chmod.mockResolvedValue(undefined); + + const result = await validateFilePermissions('/mock/path/to/file.json'); + + expect(result).toBe(true); + }); + + it('should return false if chmod fails', async () => { + fsMocks.chmod.mockRejectedValue(new Error('Permission denied')); + + const result = await validateFilePermissions('/mock/path/to/file.json'); + + expect(result).toBe(false); + }); + }); +}); diff --git a/apps/frontend/src/main/services/profile/profile-manager.ts b/apps/frontend/src/main/services/profile/profile-manager.ts new file mode 100644 index 0000000000..83029f4b58 --- /dev/null +++ b/apps/frontend/src/main/services/profile/profile-manager.ts @@ -0,0 +1,262 @@ +/** + * Profile Manager - File I/O for API profiles + * + * Handles loading and saving profiles.json from the auto-claude directory. + * Provides graceful handling for missing or corrupted files. + * Uses file locking to prevent race conditions in concurrent operations. + */ + +import { promises as fs } from 'fs'; +import path from 'path'; +import { app } from 'electron'; +// @ts-expect-error - no types available for proper-lockfile +import * as lockfile from 'proper-lockfile'; +import type { APIProfile, ProfilesFile } from '@shared/types/profile'; + +/** + * Get the path to profiles.json in the auto-claude directory + */ +export function getProfilesFilePath(): string { + const userDataPath = app.getPath('userData'); + return path.join(userDataPath, 'auto-claude', 'profiles.json'); +} + +/** + * Check if a value is a valid profile object with required fields + */ +function isValidProfile(value: unknown): value is APIProfile { + if (typeof value !== 'object' || value === null) { + return false; + } + const profile = value as Record; + return ( + typeof profile.id === 'string' && + typeof profile.name === 'string' && + typeof profile.baseUrl === 'string' && + typeof profile.apiKey === 'string' && + typeof profile.createdAt === 'number' && + typeof profile.updatedAt === 'number' + ); +} + +/** + * Validate the structure of parsed profiles data + */ +function isValidProfilesFile(data: unknown): data is ProfilesFile { + if (typeof data !== 'object' || data === null) { + return false; + } + const obj = data as Record; + + // Check profiles is an array + if (!Array.isArray(obj.profiles)) { + return false; + } + + // Check each profile has required fields + for (const profile of obj.profiles) { + if (!isValidProfile(profile)) { + return false; + } + } + + // Check activeProfileId is string or null + if (obj.activeProfileId !== null && typeof obj.activeProfileId !== 'string') { + return false; + } + + // Check version is a number + if (typeof obj.version !== 'number') { + return false; + } + + return true; +} + +/** + * Default profiles file structure for fallback + */ +function getDefaultProfilesFile(): ProfilesFile { + return { + profiles: [], + activeProfileId: null, + version: 1 + }; +} + +/** + * Load profiles.json from disk + * Returns default empty profiles file if file doesn't exist or is corrupted + */ +export async function loadProfilesFile(): Promise { + const filePath = getProfilesFilePath(); + + try { + const content = await fs.readFile(filePath, 'utf-8'); + const data = JSON.parse(content); + + // Validate parsed data structure + if (isValidProfilesFile(data)) { + return data; + } + + // Validation failed - return default + return getDefaultProfilesFile(); + } catch { + // File doesn't exist or read/parse error - return default + return getDefaultProfilesFile(); + } +} + +/** + * Save profiles.json to disk + * Creates the auto-claude directory if it doesn't exist + * Ensures secure file permissions (user read/write only) + */ +export async function saveProfilesFile(data: ProfilesFile): Promise { + const filePath = getProfilesFilePath(); + const dir = path.dirname(filePath); + + // Ensure directory exists + // mkdir with recursive: true resolves successfully if dir already exists + await fs.mkdir(dir, { recursive: true }); + + // Write file with formatted JSON + const content = JSON.stringify(data, null, 2); + await fs.writeFile(filePath, content, 'utf-8'); + + // Set secure file permissions (user read/write only - 0600) + const permissionsValid = await validateFilePermissions(filePath); + if (!permissionsValid) { + throw new Error('Failed to set secure file permissions on profiles file'); + } +} + +/** + * Generate a unique UUID v4 for a new profile + */ +export function generateProfileId(): string { + // Use crypto.randomUUID() if available (Node.js 16+ and modern browsers) + // Fall back to hand-rolled implementation for older environments + if (typeof crypto !== 'undefined' && typeof crypto.randomUUID === 'function') { + return crypto.randomUUID(); + } + + // Fallback: hand-rolled UUID v4 implementation + return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = (Math.random() * 16) | 0; + const v = c === 'x' ? r : (r & 0x3) | 0x8; + return v.toString(16); + }); +} + +/** + * Validate and set file permissions to user-readable only + * Returns true if successful, false otherwise + */ +export async function validateFilePermissions(filePath: string): Promise { + try { + // Set file permissions to user-readable only (0600) + await fs.chmod(filePath, 0o600); + return true; + } catch { + return false; + } +} + +/** + * Execute a function with exclusive file lock to prevent race conditions + * This ensures atomic read-modify-write operations on the profiles file + * + * @param fn Function to execute while holding the lock + * @returns Result of the function execution + */ +export async function withProfilesLock(fn: () => Promise): Promise { + const filePath = getProfilesFilePath(); + const dir = path.dirname(filePath); + + // Ensure directory and file exist before trying to lock + await fs.mkdir(dir, { recursive: true }); + + // Create file if it doesn't exist (needed for lockfile to work) + try { + await fs.access(filePath); + } catch { + // File doesn't exist, create it atomically with exclusive flag + const defaultData = getDefaultProfilesFile(); + try { + await fs.writeFile(filePath, JSON.stringify(defaultData, null, 2), { encoding: 'utf-8', flag: 'wx' }); + } catch (err: unknown) { + // If file was created by another process (race condition), that's fine + if ((err as NodeJS.ErrnoException).code !== 'EEXIST') { + throw err; + } + // EEXIST means another process won the race, proceed normally + } + } + + // Acquire lock with reasonable timeout + let release: (() => Promise) | undefined; + try { + release = await lockfile.lock(filePath, { + retries: { + retries: 10, + minTimeout: 50, + maxTimeout: 500 + } + }); + + // Execute the function while holding the lock + return await fn(); + } finally { + // Always release the lock + if (release) { + await release(); + } + } +} + +/** + * Atomically modify the profiles file + * Loads, modifies, and saves the file within an exclusive lock + * + * @param modifier Function that modifies the ProfilesFile + * @returns The modified ProfilesFile + */ +export async function atomicModifyProfiles( + modifier: (file: ProfilesFile) => ProfilesFile | Promise +): Promise { + return await withProfilesLock(async () => { + // Load current state + const file = await loadProfilesFile(); + + // Apply modification + const modifiedFile = await modifier(file); + + // Save atomically (write to temp file and rename) + const filePath = getProfilesFilePath(); + const tempPath = `${filePath}.tmp`; + + try { + // Write to temp file + const content = JSON.stringify(modifiedFile, null, 2); + await fs.writeFile(tempPath, content, 'utf-8'); + + // Set permissions on temp file + await fs.chmod(tempPath, 0o600); + + // Atomically replace original file + await fs.rename(tempPath, filePath); + + return modifiedFile; + } catch (error) { + // Clean up temp file on error + try { + await fs.unlink(tempPath); + } catch { + // Ignore cleanup errors + } + throw error; + } + }); +} diff --git a/apps/frontend/src/main/services/profile/profile-service.test.ts b/apps/frontend/src/main/services/profile/profile-service.test.ts new file mode 100644 index 0000000000..dfd8a07955 --- /dev/null +++ b/apps/frontend/src/main/services/profile/profile-service.test.ts @@ -0,0 +1,792 @@ +/** + * Tests for profile-service.ts + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { + validateBaseUrl, + validateApiKey, + validateProfileNameUnique, + createProfile, + updateProfile, + getAPIProfileEnv, + testConnection, + discoverModels +} from './profile-service'; +import type { APIProfile, ProfilesFile, TestConnectionResult } from '@shared/types/profile'; + +// Mock Anthropic SDK - use vi.hoisted to properly hoist the mock variable +const { mockModelsList, mockMessagesCreate } = vi.hoisted(() => ({ + mockModelsList: vi.fn(), + mockMessagesCreate: vi.fn() +})); + +vi.mock('@anthropic-ai/sdk', () => { + // Create mock error classes + class APIError extends Error { + status: number; + constructor(message: string, status: number) { + super(message); + this.name = 'APIError'; + this.status = status; + } + } + class AuthenticationError extends APIError { + constructor(message: string) { + super(message, 401); + this.name = 'AuthenticationError'; + } + } + class NotFoundError extends APIError { + constructor(message: string) { + super(message, 404); + this.name = 'NotFoundError'; + } + } + class APIConnectionError extends Error { + constructor(message: string) { + super(message); + this.name = 'APIConnectionError'; + } + } + class APIConnectionTimeoutError extends Error { + constructor(message: string) { + super(message); + this.name = 'APIConnectionTimeoutError'; + } + } + class BadRequestError extends APIError { + constructor(message: string) { + super(message, 400); + this.name = 'BadRequestError'; + } + } + + return { + default: class Anthropic { + models = { + list: mockModelsList + }; + messages = { + create: mockMessagesCreate + }; + }, + APIError, + AuthenticationError, + NotFoundError, + APIConnectionError, + APIConnectionTimeoutError, + BadRequestError + }; +}); + +// Mock profile-manager +vi.mock('./profile-manager', () => ({ + loadProfilesFile: vi.fn(), + saveProfilesFile: vi.fn(), + generateProfileId: vi.fn(() => 'mock-uuid-1234'), + validateFilePermissions: vi.fn().mockResolvedValue(true), + getProfilesFilePath: vi.fn(() => '/mock/profiles.json'), + atomicModifyProfiles: vi.fn(async (modifier: (file: ProfilesFile) => ProfilesFile) => { + // Get the current mock file from loadProfilesFile + const { loadProfilesFile, saveProfilesFile } = await import('./profile-manager'); + const file = await loadProfilesFile(); + const modified = modifier(file); + await saveProfilesFile(modified); + return modified; + }) +})); + +describe('profile-service', () => { + describe('validateBaseUrl', () => { + it('should accept valid HTTPS URLs', () => { + expect(validateBaseUrl('https://api.anthropic.com')).toBe(true); + expect(validateBaseUrl('https://custom-api.example.com')).toBe(true); + expect(validateBaseUrl('https://api.example.com/v1')).toBe(true); + }); + + it('should accept valid HTTP URLs', () => { + expect(validateBaseUrl('http://localhost:8080')).toBe(true); + expect(validateBaseUrl('http://127.0.0.1:8000')).toBe(true); + }); + + it('should reject invalid URLs', () => { + expect(validateBaseUrl('not-a-url')).toBe(false); + expect(validateBaseUrl('ftp://example.com')).toBe(false); + expect(validateBaseUrl('')).toBe(false); + expect(validateBaseUrl('https://')).toBe(false); + }); + + it('should reject URLs without valid format', () => { + expect(validateBaseUrl('anthropic.com')).toBe(false); + expect(validateBaseUrl('://api.anthropic.com')).toBe(false); + }); + }); + + describe('validateApiKey', () => { + it('should accept Anthropic API key format (sk-ant-...)', () => { + expect(validateApiKey('sk-ant-api03-12345')).toBe(true); + expect(validateApiKey('sk-ant-test-key')).toBe(true); + }); + + it('should accept OpenAI API key format (sk-...)', () => { + expect(validateApiKey('sk-proj-12345')).toBe(true); + expect(validateApiKey('sk-test-key-12345')).toBe(true); + }); + + it('should accept custom API keys with reasonable length', () => { + expect(validateApiKey('custom-key-12345678')).toBe(true); + expect(validateApiKey('x-api-key-abcdefghij')).toBe(true); + }); + + it('should reject empty or too short keys', () => { + expect(validateApiKey('')).toBe(false); + expect(validateApiKey('sk-')).toBe(false); + expect(validateApiKey('abc')).toBe(false); + }); + + it('should reject keys with only whitespace', () => { + expect(validateApiKey(' ')).toBe(false); + expect(validateApiKey('\t\n')).toBe(false); + }); + }); + + describe('validateProfileNameUnique', () => { + it('should return true when name is unique', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique('New Profile'); + expect(result).toBe(true); + }); + + it('should return false when name already exists', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique('Existing Profile'); + expect(result).toBe(false); + }); + + it('should be case-insensitive for duplicate detection', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result1 = await validateProfileNameUnique('my profile'); + const result2 = await validateProfileNameUnique('MY PROFILE'); + expect(result1).toBe(false); + expect(result2).toBe(false); + }); + + it('should trim whitespace before checking', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await validateProfileNameUnique(' My Profile '); + expect(result).toBe(false); + }); + }); + + describe('createProfile', () => { + it('should create profile with valid data and save', async () => { + const mockFile: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile, generateProfileId } = + await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + vi.mocked(generateProfileId).mockReturnValue('generated-id-123'); + + const input = { + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key', + models: { + default: 'claude-3-5-sonnet-20241022' + } + }; + + const result = await createProfile(input); + + expect(result).toMatchObject({ + id: 'generated-id-123', + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key', + models: { + default: 'claude-3-5-sonnet-20241022' + } + }); + expect(result.createdAt).toBeGreaterThan(0); + expect(result.updatedAt).toBeGreaterThan(0); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + + it('should throw error for invalid base URL', async () => { + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue({ + profiles: [], + activeProfileId: null, + version: 1 + }); + + const input = { + name: 'Test Profile', + baseUrl: 'not-a-url', + apiKey: 'sk-ant-test-key' + }; + + await expect(createProfile(input)).rejects.toThrow('Invalid base URL'); + }); + + it('should throw error for invalid API key', async () => { + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue({ + profiles: [], + activeProfileId: null, + version: 1 + }); + + const input = { + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'too-short' + }; + + await expect(createProfile(input)).rejects.toThrow('Invalid API key'); + }); + + it('should throw error for duplicate profile name', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: '1', + name: 'Existing Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + name: 'Existing Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-test-key' + }; + + await expect(createProfile(input)).rejects.toThrow( + 'A profile with this name already exists' + ); + }); + }); + + describe('updateProfile', () => { + it('should update profile name and other fields', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Old Name', + baseUrl: 'https://old-api.example.com', + apiKey: 'sk-old-key-12345678', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + + const input = { + id: 'existing-id', + name: 'New Name', + baseUrl: 'https://new-api.example.com', + apiKey: 'sk-new-api-key-123', + models: { default: 'claude-3-5-sonnet-20241022' } + }; + + const result = await updateProfile(input); + + expect(result.name).toBe('New Name'); + expect(result.baseUrl).toBe('https://new-api.example.com'); + expect(result.apiKey).toBe('sk-new-api-key-123'); + expect(result.models).toEqual({ default: 'claude-3-5-sonnet-20241022' }); + expect(result.updatedAt).toBeGreaterThan(1000000); + expect(result.createdAt).toBe(1000000); + }); + + it('should allow updating profile with same name (case-insensitive)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'My Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-old-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile, saveProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + vi.mocked(saveProfilesFile).mockResolvedValue(undefined); + + const input = { + id: 'existing-id', + name: 'my profile', + baseUrl: 'https://new-api.example.com', + apiKey: 'sk-new-api-key-456' + }; + + const result = await updateProfile(input); + expect(result.name).toBe('my profile'); + expect(saveProfilesFile).toHaveBeenCalled(); + }); + + it('should throw error when name conflicts with another profile', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Profile One', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678', + createdAt: 1000000, + updatedAt: 1000000 + }, + { + id: 'profile-2', + name: 'Profile Two', + baseUrl: 'https://api2.example.com', + apiKey: 'sk-key-two-12345678', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'profile-1', + name: 'Profile Two', + baseUrl: 'https://api1.example.com', + apiKey: 'sk-key-one-12345678' + }; + + await expect(updateProfile(input)).rejects.toThrow( + 'A profile with this name already exists' + ); + }); + + it('should throw error for invalid base URL', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'not-a-url', + apiKey: 'sk-test-api-key-123' + }; + + await expect(updateProfile(input)).rejects.toThrow('Invalid base URL'); + }); + + it('should throw error for invalid API key', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123', + createdAt: 1000000, + updatedAt: 1000000 + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'existing-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'too-short' + }; + + await expect(updateProfile(input)).rejects.toThrow('Invalid API key'); + }); + + it('should throw error when profile not found', async () => { + const mockFile: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const input = { + id: 'non-existent-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-api-key-123' + }; + + await expect(updateProfile(input)).rejects.toThrow('Profile not found'); + }); + }); + + describe('getAPIProfileEnv', () => { + it('should return empty object when no active profile (OAuth mode)', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-test-key-12345678', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: null, + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + expect(result).toEqual({}); + }); + + it('should return correct env vars for active profile with all fields', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: 'https://api.custom.com', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022', + haiku: 'claude-3-5-haiku-20241022', + sonnet: 'claude-3-5-sonnet-20241022', + opus: 'claude-3-5-opus-20241022' + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).toEqual({ + ANTHROPIC_BASE_URL: 'https://api.custom.com', + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022', + ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022', + ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022' + }); + }); + + it('should filter out empty string values', async () => { + const mockFile: ProfilesFile = { + profiles: [ + { + id: 'profile-1', + name: 'Test Profile', + baseUrl: '', + apiKey: 'sk-test-key-12345678', + models: { + default: 'claude-3-5-sonnet-20241022', + haiku: '', + sonnet: '' + }, + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'profile-1', + version: 1 + }; + + const { loadProfilesFile } = await import('./profile-manager'); + vi.mocked(loadProfilesFile).mockResolvedValue(mockFile); + + const result = await getAPIProfileEnv(); + + expect(result).not.toHaveProperty('ANTHROPIC_BASE_URL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL'); + expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL'); + expect(result).toEqual({ + ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678', + ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022' + }); + }); + }); + + describe('testConnection', () => { + beforeEach(() => { + mockModelsList.mockReset(); + mockMessagesCreate.mockReset(); + }); + + // Helper to create mock errors with proper name property + const createMockError = (name: string, message: string) => { + const error = new Error(message); + error.name = name; + return error; + }; + + it('should return success for valid credentials (200 response)', async () => { + mockModelsList.mockResolvedValue({ data: [] }); + + const result = await testConnection('https://api.anthropic.com', 'sk-ant-test-key-12'); + + expect(result).toEqual({ + success: true, + message: 'Connection successful' + }); + }); + + it('should return auth error for invalid API key (401 response)', async () => { + mockModelsList.mockRejectedValue(createMockError('AuthenticationError', 'Unauthorized')); + + const result = await testConnection('https://api.anthropic.com', 'sk-invalid-key-12'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + }); + + it('should return network error for connection refused', async () => { + mockModelsList.mockRejectedValue(createMockError('APIConnectionError', 'ECONNREFUSED')); + + const result = await testConnection('https://unreachable.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }); + }); + + it('should return timeout error for AbortError', async () => { + mockModelsList.mockRejectedValue(createMockError('APIConnectionTimeoutError', 'Timeout')); + + const result = await testConnection('https://slow.example.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }); + }); + + it('should auto-prepend https:// if missing', async () => { + mockModelsList.mockResolvedValue({ data: [] }); + + const result = await testConnection('api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: true, + message: 'Connection successful' + }); + }); + + it('should return error for empty baseUrl', async () => { + const result = await testConnection('', 'sk-test-key-12chars'); + + expect(result).toEqual({ + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }); + expect(mockModelsList).not.toHaveBeenCalled(); + }); + + it('should return error for invalid API key format', async () => { + const result = await testConnection('https://api.anthropic.com', 'short'); + + expect(result).toEqual({ + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }); + expect(mockModelsList).not.toHaveBeenCalled(); + }); + }); + + describe('discoverModels', () => { + beforeEach(() => { + mockModelsList.mockReset(); + }); + + // Helper to create mock errors with proper name property + const createMockError = (name: string, message: string) => { + const error = new Error(message); + error.name = name; + return error; + }; + + it('should return list of models for successful response', async () => { + mockModelsList.mockResolvedValue({ + data: [ + { id: 'claude-3-5-sonnet-20241022', display_name: 'Claude Sonnet 3.5', created_at: '2024-10-22', type: 'model' }, + { id: 'claude-3-5-haiku-20241022', display_name: 'Claude Haiku 3.5', created_at: '2024-10-22', type: 'model' } + ] + }); + + const result = await discoverModels('https://api.anthropic.com', 'sk-ant-test-key-12'); + + expect(result).toEqual({ + models: [ + { id: 'claude-3-5-sonnet-20241022', display_name: 'Claude Sonnet 3.5' }, + { id: 'claude-3-5-haiku-20241022', display_name: 'Claude Haiku 3.5' } + ] + }); + }); + + it('should throw auth error for 401 response', async () => { + mockModelsList.mockRejectedValue(createMockError('AuthenticationError', 'Unauthorized')); + + const error = await discoverModels('https://api.anthropic.com', 'sk-invalid-key') + .catch(e => e); + + expect(error).toBeInstanceOf(Error); + expect((error as Error & { errorType?: string }).errorType).toBe('auth'); + }); + + it('should throw not_supported error for 404 response', async () => { + mockModelsList.mockRejectedValue(createMockError('NotFoundError', 'Not Found')); + + const error = await discoverModels('https://custom-api.com', 'sk-test-key-12345678') + .catch(e => e); + + expect(error).toBeInstanceOf(Error); + expect((error as Error & { errorType?: string }).errorType).toBe('not_supported'); + }); + + it('should auto-prepend https:// if missing', async () => { + mockModelsList.mockResolvedValue({ data: [] }); + + const result = await discoverModels('api.anthropic.com', 'sk-test-key-12chars'); + + expect(result).toEqual({ models: [] }); + }); + }); +}); diff --git a/apps/frontend/src/main/services/profile/profile-service.ts b/apps/frontend/src/main/services/profile/profile-service.ts new file mode 100644 index 0000000000..f3902049c8 --- /dev/null +++ b/apps/frontend/src/main/services/profile/profile-service.ts @@ -0,0 +1,613 @@ +/** + * Profile Service - Validation and profile creation + * + * Provides validation functions for URL, API key, and profile name uniqueness. + * Handles creating new profiles with validation. + * Uses atomic operations with file locking to prevent TOCTOU race conditions. + */ + +import Anthropic, { + AuthenticationError, + NotFoundError, + APIConnectionError, + APIConnectionTimeoutError +} from '@anthropic-ai/sdk'; + +import { loadProfilesFile, generateProfileId, atomicModifyProfiles } from './profile-manager'; +import type { APIProfile, TestConnectionResult, ModelInfo, DiscoverModelsResult } from '@shared/types/profile'; + +/** + * Input type for creating a profile (without id, createdAt, updatedAt) + */ +export type CreateProfileInput = Omit; + +/** + * Input type for updating a profile (with id, without createdAt, updatedAt) + */ +export type UpdateProfileInput = Pick & CreateProfileInput; + +/** + * Validate base URL format + * Accepts HTTP(S) URLs with valid endpoints + */ +export function validateBaseUrl(baseUrl: string): boolean { + if (!baseUrl || baseUrl.trim() === '') { + return false; + } + + try { + const url = new URL(baseUrl); + // Only allow http and https protocols + return url.protocol === 'http:' || url.protocol === 'https:'; + } catch { + return false; + } +} + +/** + * Validate API key format + * Accepts various API key formats (Anthropic, OpenAI, custom) + */ +export function validateApiKey(apiKey: string): boolean { + if (!apiKey || apiKey.trim() === '') { + return false; + } + + const trimmed = apiKey.trim(); + + // Too short to be a real API key + if (trimmed.length < 12) { + return false; + } + + // Accept common API key formats + // Anthropic: sk-ant-... + // OpenAI: sk-proj-... or sk-... + // Custom: any reasonable length key with alphanumeric chars + const hasValidChars = /^[a-zA-Z0-9\-_+.]+$/.test(trimmed); + + return hasValidChars; +} + +/** + * Validate that profile name is unique (case-insensitive, trimmed) + * + * WARNING: This is for UX feedback only. Do NOT rely on this for correctness. + * The actual uniqueness check happens atomically inside create/update operations + * to prevent TOCTOU race conditions. + */ +export async function validateProfileNameUnique(name: string): Promise { + const trimmed = name.trim().toLowerCase(); + + const file = await loadProfilesFile(); + + // Check if any profile has the same name (case-insensitive) + const exists = file.profiles.some( + (p) => p.name.trim().toLowerCase() === trimmed + ); + + return !exists; +} + +/** + * Delete a profile with validation + * Throws errors for validation failures + * Uses atomic operation to prevent race conditions + */ +export async function deleteProfile(id: string): Promise { + await atomicModifyProfiles((file) => { + // Find the profile + const profileIndex = file.profiles.findIndex((p) => p.id === id); + if (profileIndex === -1) { + throw new Error('Profile not found'); + } + + // Active Profile Check: Cannot delete active profile (AC3) + if (file.activeProfileId === id) { + throw new Error('Cannot delete active profile. Please switch to another profile or OAuth first.'); + } + + // Remove profile + file.profiles.splice(profileIndex, 1); + + // Last Profile Fallback: If no profiles remain, set activeProfileId to null (AC4) + if (file.profiles.length === 0) { + file.activeProfileId = null; + } + + return file; + }); +} + +/** + * Create a new profile with validation + * Throws errors for validation failures + * Uses atomic operation to prevent race conditions in concurrent profile creation + */ +export async function createProfile(input: CreateProfileInput): Promise { + // Validate base URL + if (!validateBaseUrl(input.baseUrl)) { + throw new Error('Invalid base URL'); + } + + // Validate API key + if (!validateApiKey(input.apiKey)) { + throw new Error('Invalid API key'); + } + + // Use atomic operation to ensure uniqueness check and creation happen together + // This prevents TOCTOU race where another process creates the same profile name + // between our check and write + const newProfile = await atomicModifyProfiles((file) => { + // Re-check uniqueness within the lock (this is the authoritative check) + const trimmed = input.name.trim().toLowerCase(); + const exists = file.profiles.some( + (p) => p.name.trim().toLowerCase() === trimmed + ); + + if (exists) { + throw new Error('A profile with this name already exists'); + } + + // Create new profile + const now = Date.now(); + const profile: APIProfile = { + id: generateProfileId(), + name: input.name.trim(), + baseUrl: input.baseUrl.trim(), + apiKey: input.apiKey.trim(), + models: input.models, + createdAt: now, + updatedAt: now + }; + + // Add to profiles list + file.profiles.push(profile); + + // Set as active if it's the first profile + if (file.profiles.length === 1) { + file.activeProfileId = profile.id; + } + + return file; + }); + + // Find and return the newly created profile + const createdProfile = newProfile.profiles[newProfile.profiles.length - 1]; + return createdProfile; +} + +/** + * Update an existing profile with validation + * Throws errors for validation failures + * Uses atomic operation to prevent race conditions in concurrent profile updates + */ +export async function updateProfile(input: UpdateProfileInput): Promise { + // Validate base URL + if (!validateBaseUrl(input.baseUrl)) { + throw new Error('Invalid base URL'); + } + + // Validate API key + if (!validateApiKey(input.apiKey)) { + throw new Error('Invalid API key'); + } + + // Use atomic operation to ensure uniqueness check and update happen together + const modifiedFile = await atomicModifyProfiles((file) => { + // Find the profile + const profileIndex = file.profiles.findIndex((p) => p.id === input.id); + if (profileIndex === -1) { + throw new Error('Profile not found'); + } + + const existingProfile = file.profiles[profileIndex]; + + // Validate profile name uniqueness (exclude current profile from check) + // This check happens atomically within the lock + if (input.name.trim().toLowerCase() !== existingProfile.name.trim().toLowerCase()) { + const trimmed = input.name.trim().toLowerCase(); + const nameExists = file.profiles.some( + (p) => p.id !== input.id && p.name.trim().toLowerCase() === trimmed + ); + if (nameExists) { + throw new Error('A profile with this name already exists'); + } + } + + // Update profile (including name) + const updated: APIProfile = { + ...existingProfile, + name: input.name.trim(), + baseUrl: input.baseUrl.trim(), + apiKey: input.apiKey.trim(), + models: input.models, + updatedAt: Date.now() + }; + + // Replace in profiles list + file.profiles[profileIndex] = updated; + + return file; + }); + + // Find and return the updated profile + const updatedProfile = modifiedFile.profiles.find((p) => p.id === input.id)!; + return updatedProfile; +} + +/** + * Get environment variables for the active API profile + * + * Maps the active API profile to SDK environment variables for injection + * into Python subprocess. Returns empty object when no profile is active + * (OAuth mode), allowing CLAUDE_CODE_OAUTH_TOKEN to be used instead. + * + * Environment Variable Mapping: + * - profile.baseUrl โ†’ ANTHROPIC_BASE_URL + * - profile.apiKey โ†’ ANTHROPIC_AUTH_TOKEN + * - profile.models.default โ†’ ANTHROPIC_MODEL + * - profile.models.haiku โ†’ ANTHROPIC_DEFAULT_HAIKU_MODEL + * - profile.models.sonnet โ†’ ANTHROPIC_DEFAULT_SONNET_MODEL + * - profile.models.opus โ†’ ANTHROPIC_DEFAULT_OPUS_MODEL + * + * Empty string values are filtered out (not set as env vars). + * + * @returns Promise> Environment variables for active profile + */ +export async function getAPIProfileEnv(): Promise> { + // Load profiles.json + const file = await loadProfilesFile(); + + // If no active profile (null/empty), return empty object (OAuth mode) + if (!file.activeProfileId || file.activeProfileId === '') { + return {}; + } + + // Find active profile by activeProfileId + const profile = file.profiles.find((p) => p.id === file.activeProfileId); + + // If profile not found, return empty object (shouldn't happen with valid data) + if (!profile) { + return {}; + } + + // Map profile fields to SDK env vars + const envVars: Record = { + ANTHROPIC_BASE_URL: profile.baseUrl || '', + ANTHROPIC_AUTH_TOKEN: profile.apiKey || '', + ANTHROPIC_MODEL: profile.models?.default || '', + ANTHROPIC_DEFAULT_HAIKU_MODEL: profile.models?.haiku || '', + ANTHROPIC_DEFAULT_SONNET_MODEL: profile.models?.sonnet || '', + ANTHROPIC_DEFAULT_OPUS_MODEL: profile.models?.opus || '', + }; + + // Filter out empty/whitespace string values (only set env vars that have values) + // This handles empty strings, null, undefined, and whitespace-only values + const filteredEnvVars: Record = {}; + for (const [key, value] of Object.entries(envVars)) { + const trimmedValue = value?.trim(); + if (trimmedValue && trimmedValue !== '') { + filteredEnvVars[key] = trimmedValue; + } + } + + return filteredEnvVars; +} + +/** + * Test API profile connection + * + * Validates credentials by making a minimal API request to the /v1/models endpoint. + * Uses the Anthropic SDK for built-in timeout, retry, and error handling. + * + * @param baseUrl - API base URL (will be normalized) + * @param apiKey - API key for authentication + * @param signal - Optional AbortSignal for cancelling the request + * @returns Promise Result of connection test + */ +export async function testConnection( + baseUrl: string, + apiKey: string, + signal?: AbortSignal +): Promise { + // Validate API key first (key format doesn't depend on URL normalization) + if (!validateApiKey(apiKey)) { + return { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + } + + // Normalize baseUrl BEFORE validation (allows auto-prepending https://) + let normalizedUrl = baseUrl.trim(); + + // Store original URL for error suggestions + const originalUrl = normalizedUrl; + + // If empty, return error + if (!normalizedUrl) { + return { + success: false, + errorType: 'endpoint', + message: 'Invalid endpoint. Please check the Base URL.' + }; + } + + // Ensure https:// prefix (auto-prepend if NO protocol exists) + if (!normalizedUrl.includes('://')) { + normalizedUrl = `https://${normalizedUrl}`; + } + + // Remove trailing slash + normalizedUrl = normalizedUrl.replace(/\/+$/, ''); + + // Helper function to generate URL suggestions + const getUrlSuggestions = (url: string): string[] => { + const suggestions: string[] = []; + + if (!url.includes('://')) { + suggestions.push('Ensure URL starts with https://'); + } + + if (url.endsWith('/')) { + suggestions.push('Remove trailing slashes from URL'); + } + + const domainMatch = url.match(/:\/\/([^/]+)/); + if (domainMatch) { + const domain = domainMatch[1]; + if (domain.includes('anthropiic') || domain.includes('anthhropic') || + domain.includes('anhtropic') || domain.length < 10) { + suggestions.push('Check for typos in domain name'); + } + } + + return suggestions; + }; + + // Validate the normalized baseUrl + if (!validateBaseUrl(normalizedUrl)) { + const suggestions = getUrlSuggestions(originalUrl); + const message = suggestions.length > 0 + ? `Invalid endpoint. Please check the Base URL.${suggestions.map(s => ' ' + s).join('')}` + : 'Invalid endpoint. Please check the Base URL.'; + + return { + success: false, + errorType: 'endpoint', + message + }; + } + + // Check if signal already aborted + if (signal?.aborted) { + return { + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }; + } + + try { + // Create Anthropic client with SDK + const client = new Anthropic({ + apiKey, + baseURL: normalizedUrl, + timeout: 10000, // 10 seconds + maxRetries: 0, // Disable retries for immediate feedback + }); + + // Make minimal request to test connection (pass signal for cancellation) + // Try models.list first, but some Anthropic-compatible APIs don't support it + try { + await client.models.list({ limit: 1 }, { signal: signal ?? undefined }); + } catch (modelsError) { + // If models endpoint returns 404, try messages endpoint instead + // Many Anthropic-compatible APIs (e.g., MiniMax) only support /v1/messages + const modelsErrorName = modelsError instanceof Error ? modelsError.name : ''; + if (modelsErrorName === 'NotFoundError' || modelsError instanceof NotFoundError) { + // Fall back to messages endpoint with minimal request + // This will fail with 400 (invalid request) but proves the endpoint is reachable + try { + await client.messages.create({ + model: 'test', + max_tokens: 1, + messages: [{ role: 'user', content: 'test' }] + }, { signal: signal ?? undefined }); + } catch (messagesError) { + const messagesErrorName = messagesError instanceof Error ? messagesError.name : ''; + // 400/422 errors mean the endpoint is valid, just our test request was invalid + // This is expected - we're just testing connectivity + if (messagesErrorName === 'BadRequestError' || + messagesErrorName === 'InvalidRequestError' || + (messagesError instanceof Error && 'status' in messagesError && + ((messagesError as { status?: number }).status === 400 || + (messagesError as { status?: number }).status === 422))) { + // Endpoint is valid, connection successful + return { + success: true, + message: 'Connection successful' + }; + } + // Re-throw other errors to be handled by outer catch + throw messagesError; + } + // If messages.create somehow succeeded, connection is valid + return { + success: true, + message: 'Connection successful' + }; + } + // Re-throw non-404 errors to be handled by outer catch + throw modelsError; + } + + return { + success: true, + message: 'Connection successful' + }; + } catch (error) { + // Map SDK errors to TestConnectionResult error types + // Use error.name for instanceof-like checks (works with mocks that set this.name) + const errorName = error instanceof Error ? error.name : ''; + + if (errorName === 'AuthenticationError' || error instanceof AuthenticationError) { + return { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + }; + } + + if (errorName === 'NotFoundError' || error instanceof NotFoundError) { + const suggestions = getUrlSuggestions(baseUrl.trim()); + const message = suggestions.length > 0 + ? `Invalid endpoint. Please check the Base URL.${suggestions.map(s => ' ' + s).join('')}` + : 'Invalid endpoint. Please check the Base URL.'; + + return { + success: false, + errorType: 'endpoint', + message + }; + } + + if (errorName === 'APIConnectionTimeoutError' || error instanceof APIConnectionTimeoutError) { + return { + success: false, + errorType: 'timeout', + message: 'Connection timeout. The endpoint did not respond.' + }; + } + + if (errorName === 'APIConnectionError' || error instanceof APIConnectionError) { + return { + success: false, + errorType: 'network', + message: 'Network error. Please check your internet connection.' + }; + } + + // APIError or other errors + return { + success: false, + errorType: 'unknown', + message: 'Connection test failed. Please try again.' + }; + } +} + +/** + * Discover available models from API endpoint + * + * Fetches the list of available models from the Anthropic-compatible /v1/models endpoint. + * Uses the Anthropic SDK for built-in timeout, retry, and error handling. + * + * @param baseUrl - API base URL (will be normalized) + * @param apiKey - API key for authentication + * @param signal - Optional AbortSignal for cancelling the request (checked before request) + * @returns Promise List of available models + * @throws Error with errorType for auth/network/endpoint/timeout/not_supported failures + */ +export async function discoverModels( + baseUrl: string, + apiKey: string, + signal?: AbortSignal +): Promise { + // Validate API key first + if (!validateApiKey(apiKey)) { + const error: Error & { errorType?: string } = new Error('Authentication failed. Please check your API key.'); + error.errorType = 'auth'; + throw error; + } + + // Normalize baseUrl BEFORE validation + let normalizedUrl = baseUrl.trim(); + + // If empty, throw error + if (!normalizedUrl) { + const error: Error & { errorType?: string } = new Error('Invalid endpoint. Please check the Base URL.'); + error.errorType = 'endpoint'; + throw error; + } + + // Ensure https:// prefix (auto-prepend if NO protocol exists) + if (!normalizedUrl.includes('://')) { + normalizedUrl = `https://${normalizedUrl}`; + } + + // Remove trailing slash + normalizedUrl = normalizedUrl.replace(/\/+$/, ''); + + // Validate the normalized baseUrl + if (!validateBaseUrl(normalizedUrl)) { + const error: Error & { errorType?: string } = new Error('Invalid endpoint. Please check the Base URL.'); + error.errorType = 'endpoint'; + throw error; + } + + // Check if signal already aborted + if (signal?.aborted) { + const error: Error & { errorType?: string } = new Error('Connection timeout. The endpoint did not respond.'); + error.errorType = 'timeout'; + throw error; + } + + try { + // Create Anthropic client with SDK + const client = new Anthropic({ + apiKey, + baseURL: normalizedUrl, + timeout: 10000, // 10 seconds + maxRetries: 0, // Disable retries for immediate feedback + }); + + // Fetch models with pagination (1000 limit to get all), pass signal for cancellation + const response = await client.models.list({ limit: 1000 }, { signal: signal ?? undefined }); + + // Extract model information from SDK response + const models: ModelInfo[] = response.data + .map((model) => ({ + id: model.id || '', + display_name: model.display_name || model.id || '' + })) + .filter((model) => model.id.length > 0); + + return { models }; + } catch (error) { + // Map SDK errors to thrown errors with errorType property + // Use error.name for instanceof-like checks (works with mocks that set this.name) + const errorName = error instanceof Error ? error.name : ''; + + if (errorName === 'AuthenticationError' || error instanceof AuthenticationError) { + const authError: Error & { errorType?: string } = new Error('Authentication failed. Please check your API key.'); + authError.errorType = 'auth'; + throw authError; + } + + if (errorName === 'NotFoundError' || error instanceof NotFoundError) { + const notSupportedError: Error & { errorType?: string } = new Error('This API endpoint does not support model listing. Please enter the model name manually.'); + notSupportedError.errorType = 'not_supported'; + throw notSupportedError; + } + + if (errorName === 'APIConnectionTimeoutError' || error instanceof APIConnectionTimeoutError) { + const timeoutError: Error & { errorType?: string } = new Error('Connection timeout. The endpoint did not respond.'); + timeoutError.errorType = 'timeout'; + throw timeoutError; + } + + if (errorName === 'APIConnectionError' || error instanceof APIConnectionError) { + const networkError: Error & { errorType?: string } = new Error('Network error. Please check your internet connection.'); + networkError.errorType = 'network'; + throw networkError; + } + + // APIError or other errors + const unknownError: Error & { errorType?: string } = new Error('Connection test failed. Please try again.'); + unknownError.errorType = 'unknown'; + throw unknownError; + } +} diff --git a/apps/frontend/src/main/task-log-service.ts b/apps/frontend/src/main/task-log-service.ts index 9ad2569649..7752143857 100644 --- a/apps/frontend/src/main/task-log-service.ts +++ b/apps/frontend/src/main/task-log-service.ts @@ -2,6 +2,15 @@ import path from 'path'; import { existsSync, readFileSync, watchFile } from 'fs'; import { EventEmitter } from 'events'; import type { TaskLogs, TaskLogPhase, TaskLogStreamChunk, TaskPhaseLog } from '../shared/types'; +import { findTaskWorktree } from './worktree-paths'; + +function findWorktreeSpecDir(projectPath: string, specId: string, specsRelPath: string): string | null { + const worktreePath = findTaskWorktree(projectPath, specId); + if (worktreePath) { + return path.join(worktreePath, specsRelPath, specId); + } + return null; +} /** * Service for loading and watching phase-based task logs (task_logs.json) @@ -120,7 +129,7 @@ export class TaskLogService extends EventEmitter { worktreeSpecDir = watchedInfo[1].worktreeSpecDir; } else if (projectPath && specsRelPath && specId) { // Calculate worktree path from provided params - worktreeSpecDir = path.join(projectPath, '.worktrees', specId, specsRelPath, specId); + worktreeSpecDir = findWorktreeSpecDir(projectPath, specId, specsRelPath); } if (!worktreeSpecDir) { @@ -172,16 +181,22 @@ export class TaskLogService extends EventEmitter { * @param specsRelPath - Optional: Relative path to specs (e.g., "auto-claude/specs") */ startWatching(specId: string, specDir: string, projectPath?: string, specsRelPath?: string): void { - // Stop any existing watch + // Check if already watching with the same parameters (prevents rapid watch/unwatch cycles) + const existingWatch = this.watchedPaths.get(specId); + if (existingWatch && existingWatch.mainSpecDir === specDir) { + // Already watching this spec with the same spec directory - no-op + return; + } + + // Stop any existing watch (different spec dir or first time) this.stopWatching(specId); const mainLogFile = path.join(specDir, 'task_logs.json'); // Calculate worktree spec directory path if we have project info - // Worktree structure: .worktrees/{specId}/{specsRelPath}/{specId}/ let worktreeSpecDir: string | null = null; if (projectPath && specsRelPath) { - worktreeSpecDir = path.join(projectPath, '.worktrees', specId, specsRelPath, specId); + worktreeSpecDir = findWorktreeSpecDir(projectPath, specId, specsRelPath); } // Store watched paths for this specId @@ -222,10 +237,31 @@ export class TaskLogService extends EventEmitter { } // Poll for changes in both locations + // Note: worktreeSpecDir may be null initially if worktree doesn't exist yet. + // We need to dynamically re-discover it during polling. const pollInterval = setInterval(() => { let mainChanged = false; let worktreeChanged = false; + // Dynamically re-discover worktree if not found yet + // This handles the case where user opens logs before worktree is created + const watchedInfo = this.watchedPaths.get(specId); + let currentWorktreeSpecDir = watchedInfo?.worktreeSpecDir || null; + + if (!currentWorktreeSpecDir && projectPath && specsRelPath) { + const discoveredWorktree = findWorktreeSpecDir(projectPath, specId, specsRelPath); + if (discoveredWorktree) { + currentWorktreeSpecDir = discoveredWorktree; + // Update stored paths so future iterations don't need to re-discover + this.watchedPaths.set(specId, { + mainSpecDir: specDir, + worktreeSpecDir: discoveredWorktree, + specsRelPath: specsRelPath + }); + console.warn(`[TaskLogService] Discovered worktree for ${specId}: ${discoveredWorktree}`); + } + } + // Check main spec dir if (existsSync(mainLogFile)) { try { @@ -240,8 +276,8 @@ export class TaskLogService extends EventEmitter { } // Check worktree spec dir - if (worktreeSpecDir) { - const worktreeLogFile = path.join(worktreeSpecDir, 'task_logs.json'); + if (currentWorktreeSpecDir) { + const worktreeLogFile = path.join(currentWorktreeSpecDir, 'task_logs.json'); if (existsSync(worktreeLogFile)) { try { const currentContent = readFileSync(worktreeLogFile, 'utf-8'); diff --git a/apps/frontend/src/main/terminal-name-generator.ts b/apps/frontend/src/main/terminal-name-generator.ts index afe31de18a..d442949661 100644 --- a/apps/frontend/src/main/terminal-name-generator.ts +++ b/apps/frontend/src/main/terminal-name-generator.ts @@ -46,6 +46,23 @@ export class TerminalNameGenerator extends EventEmitter { return this.autoBuildSourcePath; } + // In packaged app, check userData override first (consistent with path-resolver.ts) + if (app.isPackaged) { + // Check for user-updated backend source first (takes priority over bundled) + const overridePath = path.join(app.getPath('userData'), 'backend-source'); + if (existsSync(overridePath) && existsSync(path.join(overridePath, 'runners', 'spec_runner.py'))) { + debug('Using user-updated backend from userData:', overridePath); + return overridePath; + } + // Fall back to bundled backend in resources + const resourcesPath = path.join(process.resourcesPath, 'backend'); + if (existsSync(resourcesPath) && existsSync(path.join(resourcesPath, 'runners', 'spec_runner.py'))) { + debug('Using bundled backend from resources:', resourcesPath); + return resourcesPath; + } + } + + // Development mode paths const possiblePaths = [ // Apps structure: from out/main -> apps/backend path.resolve(__dirname, '..', '..', '..', 'backend'), diff --git a/apps/frontend/src/main/terminal-session-store.ts b/apps/frontend/src/main/terminal-session-store.ts index b3637756da..e108173a58 100644 --- a/apps/frontend/src/main/terminal-session-store.ts +++ b/apps/frontend/src/main/terminal-session-store.ts @@ -1,6 +1,7 @@ import { app } from 'electron'; import { join } from 'path'; import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs'; +import type { TerminalWorktreeConfig } from '../shared/types'; /** * Persisted terminal session data @@ -15,6 +16,8 @@ export interface TerminalSession { outputBuffer: string; // Last 100KB of output for replay createdAt: string; // ISO timestamp lastActiveAt: string; // ISO timestamp + /** Associated worktree configuration (validated on restore) */ + worktreeConfig?: TerminalWorktreeConfig; } /** @@ -203,21 +206,47 @@ export class TerminalSessionStore { this.save(); } + /** + * Validate worktree config - check if the worktree still exists + * Returns undefined if worktree doesn't exist or is invalid + */ + private validateWorktreeConfig(config: TerminalWorktreeConfig | undefined): TerminalWorktreeConfig | undefined { + if (!config) return undefined; + + // Check if the worktree path still exists + if (!existsSync(config.worktreePath)) { + console.warn(`[TerminalSessionStore] Worktree path no longer exists: ${config.worktreePath}, clearing config`); + return undefined; + } + + return config; + } + /** * Get most recent sessions for a project. * First checks today, then looks at the most recent date with sessions. - * This ensures sessions survive app restarts even after midnight. + * When restoring from a previous date, MIGRATES sessions to today to prevent + * duplication issues across days. + * Validates worktree configs - clears them if worktree no longer exists. */ getSessions(projectPath: string): TerminalSession[] { + const today = getDateString(); + // First check today const todaySessions = this.getTodaysSessions(); if (todaySessions[projectPath]?.length > 0) { - return todaySessions[projectPath]; + // Validate worktree configs before returning + return todaySessions[projectPath].map(session => ({ + ...session, + worktreeConfig: this.validateWorktreeConfig(session.worktreeConfig), + })); } // If no sessions today, find the most recent date with sessions for this project const dates = Object.keys(this.data.sessionsByDate) .filter(date => { + // Exclude today since we already checked it + if (date === today) return false; const sessions = this.data.sessionsByDate[date][projectPath]; return sessions && sessions.length > 0; }) @@ -225,8 +254,34 @@ export class TerminalSessionStore { if (dates.length > 0) { const mostRecentDate = dates[0]; - console.warn(`[TerminalSessionStore] No sessions today, using sessions from ${mostRecentDate}`); - return this.data.sessionsByDate[mostRecentDate][projectPath] || []; + console.warn(`[TerminalSessionStore] No sessions today, migrating sessions from ${mostRecentDate} to today`); + const sessions = this.data.sessionsByDate[mostRecentDate][projectPath] || []; + + // MIGRATE: Copy sessions to today's bucket with validated worktree configs + const migratedSessions = sessions.map(session => ({ + ...session, + worktreeConfig: this.validateWorktreeConfig(session.worktreeConfig), + // Update lastActiveAt to now since we're restoring them + lastActiveAt: new Date().toISOString(), + })); + + // Add migrated sessions to today + todaySessions[projectPath] = migratedSessions; + + // Remove sessions from the old date to prevent duplication + delete this.data.sessionsByDate[mostRecentDate][projectPath]; + + // Clean up empty date buckets + if (Object.keys(this.data.sessionsByDate[mostRecentDate]).length === 0) { + delete this.data.sessionsByDate[mostRecentDate]; + } + + // Save the migration + this.save(); + + console.warn(`[TerminalSessionStore] Migrated ${migratedSessions.length} sessions from ${mostRecentDate} to ${today}`); + + return migratedSessions; } return []; @@ -234,11 +289,17 @@ export class TerminalSessionStore { /** * Get sessions for a specific date and project + * Validates worktree configs - clears them if worktree no longer exists. */ getSessionsForDate(date: string, projectPath: string): TerminalSession[] { const dateSessions = this.data.sessionsByDate[date]; if (!dateSessions) return []; - return dateSessions[projectPath] || []; + const sessions = dateSessions[projectPath] || []; + // Validate worktree configs before returning + return sessions.map(session => ({ + ...session, + worktreeConfig: this.validateWorktreeConfig(session.worktreeConfig), + })); } /** diff --git a/apps/frontend/src/main/terminal/__tests__/claude-integration-handler.test.ts b/apps/frontend/src/main/terminal/__tests__/claude-integration-handler.test.ts new file mode 100644 index 0000000000..739b58bd4d --- /dev/null +++ b/apps/frontend/src/main/terminal/__tests__/claude-integration-handler.test.ts @@ -0,0 +1,609 @@ +import { writeFileSync } from 'fs'; +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import type * as pty from '@lydell/node-pty'; +import type { TerminalProcess } from '../types'; + +const mockGetClaudeCliInvocation = vi.fn(); +const mockGetClaudeProfileManager = vi.fn(); +const mockPersistSession = vi.fn(); +const mockReleaseSessionId = vi.fn(); + +const createMockDisposable = (): pty.IDisposable => ({ dispose: vi.fn() }); + +const createMockPty = (): pty.IPty => ({ + pid: 123, + cols: 80, + rows: 24, + process: 'bash', + handleFlowControl: false, + onData: vi.fn(() => createMockDisposable()), + onExit: vi.fn(() => createMockDisposable()), + write: vi.fn(), + resize: vi.fn(), + pause: vi.fn(), + resume: vi.fn(), + kill: vi.fn(), + clear: vi.fn(), +}); + +const createMockTerminal = (overrides: Partial = {}): TerminalProcess => ({ + id: 'term-1', + pty: createMockPty(), + outputBuffer: '', + isClaudeMode: false, + claudeSessionId: undefined, + claudeProfileId: undefined, + title: 'Claude', + cwd: '/tmp/project', + projectPath: '/tmp/project', + ...overrides, +}); + +vi.mock('../../claude-cli-utils', () => ({ + getClaudeCliInvocation: mockGetClaudeCliInvocation, +})); + +vi.mock('../../claude-profile-manager', () => ({ + getClaudeProfileManager: mockGetClaudeProfileManager, +})); + +vi.mock('fs', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + writeFileSync: vi.fn(), + }; +}); + +vi.mock('../session-handler', () => ({ + persistSession: mockPersistSession, + releaseSessionId: mockReleaseSessionId, +})); + +vi.mock('os', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + tmpdir: vi.fn(() => '/tmp'), + }; +}); + +describe('claude-integration-handler', () => { + beforeEach(() => { + mockGetClaudeCliInvocation.mockClear(); + mockGetClaudeProfileManager.mockClear(); + mockPersistSession.mockClear(); + mockReleaseSessionId.mockClear(); + vi.mocked(writeFileSync).mockClear(); + }); + + it('uses the resolved CLI path and PATH prefix when invoking Claude', async () => { + mockGetClaudeCliInvocation.mockReturnValue({ + command: "/opt/claude bin/claude's", + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + const profileManager = { + getActiveProfile: vi.fn(() => ({ id: 'default', name: 'Default', isDefault: true })), + getProfile: vi.fn(), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal(); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', undefined, () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain("cd '/tmp/project' && "); + expect(written).toContain("PATH='/opt/claude/bin:/usr/bin' "); + expect(written).toContain("'/opt/claude bin/claude'\\''s'"); + expect(mockReleaseSessionId).toHaveBeenCalledWith('term-1'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + expect(profileManager.getActiveProfile).toHaveBeenCalled(); + expect(profileManager.markProfileUsed).toHaveBeenCalledWith('default'); + }); + + it('converts Windows PATH separators to colons for bash invocations', async () => { + const originalPlatform = Object.getOwnPropertyDescriptor(process, 'platform'); + Object.defineProperty(process, 'platform', { value: 'win32' }); + + try { + mockGetClaudeCliInvocation.mockReturnValue({ + command: 'C:\\Tools\\claude\\claude.exe', + env: { PATH: 'C:\\Tools\\claude;C:\\Windows' }, + }); + const profileManager = { + getActiveProfile: vi.fn(() => ({ id: 'default', name: 'Default', isDefault: true })), + getProfile: vi.fn(), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal(); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', undefined, () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain("PATH='C:\\Tools\\claude:C:\\Windows' "); + expect(written).not.toContain('C:\\Tools\\claude;C:\\Windows'); + } finally { + if (originalPlatform) { + Object.defineProperty(process, 'platform', originalPlatform); + } + } + }); + + it('throws when invokeClaude cannot resolve the CLI invocation', async () => { + mockGetClaudeCliInvocation.mockImplementation(() => { + throw new Error('boom'); + }); + const profileManager = { + getActiveProfile: vi.fn(() => ({ id: 'default', name: 'Default', isDefault: true })), + getProfile: vi.fn(), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal({ id: 'term-err' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + expect(() => invokeClaude(terminal, '/tmp/project', undefined, () => null, vi.fn())).toThrow('boom'); + expect(mockReleaseSessionId).toHaveBeenCalledWith('term-err'); + expect(terminal.pty.write).not.toHaveBeenCalled(); + }); + + it('throws when resumeClaude cannot resolve the CLI invocation', async () => { + mockGetClaudeCliInvocation.mockImplementation(() => { + throw new Error('boom'); + }); + + const terminal = createMockTerminal({ + id: 'term-err-2', + cwd: undefined, + projectPath: '/tmp/project', + }); + + const { resumeClaude } = await import('../claude-integration-handler'); + expect(() => resumeClaude(terminal, 'abc123', () => null)).toThrow('boom'); + expect(terminal.pty.write).not.toHaveBeenCalled(); + }); + + it('throws when writing the OAuth token temp file fails', async () => { + mockGetClaudeCliInvocation.mockReturnValue({ + command: '/opt/claude/bin/claude', + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-err', + name: 'Work', + isDefault: false, + oauthToken: 'token-value', + })), + getProfileToken: vi.fn(() => 'token-value'), + markProfileUsed: vi.fn(), + }; + mockGetClaudeProfileManager.mockReturnValue(profileManager); + vi.mocked(writeFileSync).mockImplementationOnce(() => { + throw new Error('disk full'); + }); + + const terminal = createMockTerminal({ id: 'term-err-3' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + expect(() => invokeClaude(terminal, '/tmp/project', 'prof-err', () => null, vi.fn())).toThrow('disk full'); + expect(terminal.pty.write).not.toHaveBeenCalled(); + }); + + it('uses the temp token flow when the active profile has an oauth token', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-1', + name: 'Work', + isDefault: false, + oauthToken: 'token-value', + })), + getProfileToken: vi.fn(() => 'token-value'), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + const nowSpy = vi.spyOn(Date, 'now').mockReturnValue(1234); + + const terminal = createMockTerminal({ id: 'term-3' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'prof-1', () => null, vi.fn()); + + const tokenPath = vi.mocked(writeFileSync).mock.calls[0]?.[0] as string; + const tokenContents = vi.mocked(writeFileSync).mock.calls[0]?.[1] as string; + expect(tokenPath).toMatch(/^\/tmp\/\.claude-token-1234-[0-9a-f]{16}$/); + expect(tokenContents).toBe("export CLAUDE_CODE_OAUTH_TOKEN='token-value'\n"); + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain("HISTFILE= HISTCONTROL=ignorespace "); + expect(written).toContain(`source '${tokenPath}'`); + expect(written).toContain(`rm -f '${tokenPath}'`); + expect(written).toContain(`exec '${command}'`); + expect(profileManager.getProfile).toHaveBeenCalledWith('prof-1'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + + nowSpy.mockRestore(); + }); + + it('prefers the temp token flow when profile has both oauth token and config dir', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-both', + name: 'Work', + isDefault: false, + oauthToken: 'token-value', + configDir: '/tmp/claude-config', + })), + getProfileToken: vi.fn(() => 'token-value'), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + const nowSpy = vi.spyOn(Date, 'now').mockReturnValue(5678); + + const terminal = createMockTerminal({ id: 'term-both' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'prof-both', () => null, vi.fn()); + + const tokenPath = vi.mocked(writeFileSync).mock.calls[0]?.[0] as string; + const tokenContents = vi.mocked(writeFileSync).mock.calls[0]?.[1] as string; + expect(tokenPath).toMatch(/^\/tmp\/\.claude-token-5678-[0-9a-f]{16}$/); + expect(tokenContents).toBe("export CLAUDE_CODE_OAUTH_TOKEN='token-value'\n"); + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain(`source '${tokenPath}'`); + expect(written).toContain(`rm -f '${tokenPath}'`); + expect(written).toContain(`exec '${command}'`); + expect(written).not.toContain('CLAUDE_CONFIG_DIR='); + expect(profileManager.getProfile).toHaveBeenCalledWith('prof-both'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + expect(profileManager.markProfileUsed).toHaveBeenCalledWith('prof-both'); + + nowSpy.mockRestore(); + }); + + it('handles missing profiles by falling back to the default command', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => undefined), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal({ id: 'term-6' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'missing', () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain(`'${command}'`); + expect(profileManager.getProfile).toHaveBeenCalledWith('missing'); + expect(profileManager.markProfileUsed).not.toHaveBeenCalled(); + }); + + it('uses the config dir flow when the active profile has a config dir', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-2', + name: 'Work', + isDefault: false, + configDir: '/tmp/claude-config', + })), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal({ id: 'term-4' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'prof-2', () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain("HISTFILE= HISTCONTROL=ignorespace "); + expect(written).toContain("CLAUDE_CONFIG_DIR='/tmp/claude-config'"); + expect(written).toContain(`exec '${command}'`); + expect(profileManager.getProfile).toHaveBeenCalledWith('prof-2'); + expect(profileManager.markProfileUsed).toHaveBeenCalledWith('prof-2'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + }); + + it('uses profile switching when a non-default profile is requested', async () => { + const command = '/opt/claude/bin/claude'; + const profileManager = { + getActiveProfile: vi.fn(), + getProfile: vi.fn(() => ({ + id: 'prof-3', + name: 'Team', + isDefault: false, + })), + getProfileToken: vi.fn(() => null), + markProfileUsed: vi.fn(), + }; + + mockGetClaudeCliInvocation.mockReturnValue({ + command, + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + mockGetClaudeProfileManager.mockReturnValue(profileManager); + + const terminal = createMockTerminal({ id: 'term-5' }); + + const { invokeClaude } = await import('../claude-integration-handler'); + invokeClaude(terminal, '/tmp/project', 'prof-3', () => null, vi.fn()); + + const written = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(written).toContain(`'${command}'`); + expect(written).toContain("PATH='/opt/claude/bin:/usr/bin' "); + expect(profileManager.getProfile).toHaveBeenCalledWith('prof-3'); + expect(profileManager.markProfileUsed).toHaveBeenCalledWith('prof-3'); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + }); + + it('uses --continue regardless of sessionId (sessionId is deprecated)', async () => { + mockGetClaudeCliInvocation.mockReturnValue({ + command: '/opt/claude/bin/claude', + env: { PATH: '/opt/claude/bin:/usr/bin' }, + }); + + const terminal = createMockTerminal({ + id: 'term-2', + cwd: undefined, + projectPath: '/tmp/project', + }); + + const { resumeClaude } = await import('../claude-integration-handler'); + + // Even when sessionId is passed, it should be ignored and --continue used + resumeClaude(terminal, 'abc123', () => null); + + const resumeCall = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(resumeCall).toContain("PATH='/opt/claude/bin:/usr/bin' "); + expect(resumeCall).toContain("'/opt/claude/bin/claude' --continue"); + expect(resumeCall).not.toContain('--resume'); + // sessionId is cleared because --continue doesn't track specific sessions + expect(terminal.claudeSessionId).toBeUndefined(); + expect(terminal.isClaudeMode).toBe(true); + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + + vi.mocked(terminal.pty.write).mockClear(); + mockPersistSession.mockClear(); + terminal.projectPath = undefined; + terminal.isClaudeMode = false; + resumeClaude(terminal, undefined, () => null); + const continueCall = vi.mocked(terminal.pty.write).mock.calls[0][0] as string; + expect(continueCall).toContain("'/opt/claude/bin/claude' --continue"); + expect(terminal.isClaudeMode).toBe(true); + expect(terminal.claudeSessionId).toBeUndefined(); + expect(mockPersistSession).not.toHaveBeenCalled(); + }); +}); + +/** + * Unit tests for helper functions + */ +describe('claude-integration-handler - Helper Functions', () => { + describe('buildClaudeShellCommand', () => { + it('should build default command without cwd or PATH prefix', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand('', '', "'/opt/bin/claude'", { method: 'default' }); + + expect(result).toBe("'/opt/bin/claude'\r"); + }); + + it('should build command with cwd', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand("cd '/tmp/project' && ", '', "'/opt/bin/claude'", { method: 'default' }); + + expect(result).toBe("cd '/tmp/project' && '/opt/bin/claude'\r"); + }); + + it('should build command with PATH prefix', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand('', "PATH='/custom/path' ", "'/opt/bin/claude'", { method: 'default' }); + + expect(result).toBe("PATH='/custom/path' '/opt/bin/claude'\r"); + }); + + it('should build temp-file method command with history-safe prefixes', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand( + "cd '/tmp/project' && ", + "PATH='/opt/bin' ", + "'/opt/bin/claude'", + { method: 'temp-file', escapedTempFile: "'/tmp/.token-123'" } + ); + + expect(result).toContain('clear && '); + expect(result).toContain("cd '/tmp/project' && "); + expect(result).toContain('HISTFILE= HISTCONTROL=ignorespace'); + expect(result).toContain("PATH='/opt/bin' "); + expect(result).toContain("source '/tmp/.token-123'"); + expect(result).toContain("rm -f '/tmp/.token-123'"); + expect(result).toContain("exec '/opt/bin/claude'"); + }); + + it('should build config-dir method command with CLAUDE_CONFIG_DIR', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand( + "cd '/tmp/project' && ", + "PATH='/opt/bin' ", + "'/opt/bin/claude'", + { method: 'config-dir', escapedConfigDir: "'/home/user/.claude-work'" } + ); + + expect(result).toContain('clear && '); + expect(result).toContain("cd '/tmp/project' && "); + expect(result).toContain('HISTFILE= HISTCONTROL=ignorespace'); + expect(result).toContain("CLAUDE_CONFIG_DIR='/home/user/.claude-work'"); + expect(result).toContain("PATH='/opt/bin' "); + expect(result).toContain("exec '/opt/bin/claude'"); + }); + + it('should handle empty cwdCommand for temp-file method', async () => { + const { buildClaudeShellCommand } = await import('../claude-integration-handler'); + const result = buildClaudeShellCommand( + '', + '', + "'/opt/bin/claude'", + { method: 'temp-file', escapedTempFile: "'/tmp/.token'" } + ); + + expect(result).toContain('clear && '); + expect(result).toContain('HISTFILE= HISTCONTROL=ignorespace'); + expect(result).not.toContain('cd '); + expect(result).toContain("source '/tmp/.token'"); + }); + }); + + describe('finalizeClaudeInvoke', () => { + it('should set terminal title to "Claude" for default profile', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + const mockWindow = { + webContents: { send: vi.fn() } + }; + + finalizeClaudeInvoke( + terminal, + { name: 'Default', isDefault: true }, + '/tmp/project', + Date.now(), + () => mockWindow as any, + vi.fn() + ); + + expect(terminal.title).toBe('Claude'); + }); + + it('should set terminal title to "Claude (ProfileName)" for non-default profile', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + const mockWindow = { + webContents: { send: vi.fn() } + }; + + finalizeClaudeInvoke( + terminal, + { name: 'Work Profile', isDefault: false }, + '/tmp/project', + Date.now(), + () => mockWindow as any, + vi.fn() + ); + + expect(terminal.title).toBe('Claude (Work Profile)'); + }); + + it('should send IPC message to renderer', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + const mockSend = vi.fn(); + const mockWindow = { + webContents: { send: mockSend } + }; + + finalizeClaudeInvoke( + terminal, + undefined, + '/tmp/project', + Date.now(), + () => mockWindow as any, + vi.fn() + ); + + expect(mockSend).toHaveBeenCalledWith( + expect.stringContaining('title'), + terminal.id, + 'Claude' + ); + }); + + it('should persist session when terminal has projectPath', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal({ projectPath: '/tmp/project' }); + + finalizeClaudeInvoke( + terminal, + undefined, + '/tmp/project', + Date.now(), + () => null, + vi.fn() + ); + + expect(mockPersistSession).toHaveBeenCalledWith(terminal); + }); + + it('should call onSessionCapture when projectPath is provided', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + const mockOnSessionCapture = vi.fn(); + const startTime = Date.now(); + + finalizeClaudeInvoke( + terminal, + undefined, + '/tmp/project', + startTime, + () => null, + mockOnSessionCapture + ); + + expect(mockOnSessionCapture).toHaveBeenCalledWith(terminal.id, '/tmp/project', startTime); + }); + + it('should not crash when getWindow returns null', async () => { + const { finalizeClaudeInvoke } = await import('../claude-integration-handler'); + const terminal = createMockTerminal(); + + expect(() => { + finalizeClaudeInvoke( + terminal, + undefined, + '/tmp/project', + Date.now(), + () => null, + vi.fn() + ); + }).not.toThrow(); + }); + }); +}); diff --git a/apps/frontend/src/main/terminal/claude-integration-handler.ts b/apps/frontend/src/main/terminal/claude-integration-handler.ts index ae761772bf..ae420b2d97 100644 --- a/apps/frontend/src/main/terminal/claude-integration-handler.ts +++ b/apps/frontend/src/main/terminal/claude-integration-handler.ts @@ -5,13 +5,16 @@ import * as os from 'os'; import * as fs from 'fs'; +import { promises as fsPromises } from 'fs'; import * as path from 'path'; +import * as crypto from 'crypto'; import { IPC_CHANNELS } from '../../shared/constants'; -import { getClaudeProfileManager } from '../claude-profile-manager'; +import { getClaudeProfileManager, initializeClaudeProfileManager } from '../claude-profile-manager'; import * as OutputParser from './output-parser'; import * as SessionHandler from './session-handler'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; import { escapeShellArg, buildCdCommand } from '../../shared/utils/shell-escape'; +import { getClaudeCliInvocation, getClaudeCliInvocationAsync } from '../claude-cli-utils'; import type { TerminalProcess, WindowGetter, @@ -19,6 +22,137 @@ import type { OAuthTokenEvent } from './types'; +function normalizePathForBash(envPath: string): string { + return process.platform === 'win32' ? envPath.replace(/;/g, ':') : envPath; +} + +// ============================================================================ +// SHARED HELPERS - Used by both sync and async invokeClaude +// ============================================================================ + +/** + * Configuration for building Claude shell commands using discriminated union. + * This provides type safety by ensuring the correct options are provided for each method. + */ +type ClaudeCommandConfig = + | { method: 'default' } + | { method: 'temp-file'; escapedTempFile: string } + | { method: 'config-dir'; escapedConfigDir: string }; + +/** + * Build the shell command for invoking Claude CLI. + * + * Generates the appropriate command string based on the invocation method: + * - 'default': Simple command execution + * - 'temp-file': Sources OAuth token from temp file, then removes it + * - 'config-dir': Sets CLAUDE_CONFIG_DIR for custom profile location + * + * All non-default methods include history-safe prefixes (HISTFILE=, HISTCONTROL=) + * to prevent sensitive data from appearing in shell history. + * + * @param cwdCommand - Command to change directory (empty string if no change needed) + * @param pathPrefix - PATH prefix for Claude CLI (empty string if not needed) + * @param escapedClaudeCmd - Shell-escaped Claude CLI command + * @param config - Configuration object with method and required options (discriminated union) + * @returns Complete shell command string ready for terminal.pty.write() + * + * @example + * // Default method + * buildClaudeShellCommand('cd /path && ', 'PATH=/bin ', 'claude', { method: 'default' }); + * // Returns: 'cd /path && PATH=/bin claude\r' + * + * // Temp file method + * buildClaudeShellCommand('', '', 'claude', { method: 'temp-file', escapedTempFile: '/tmp/token' }); + * // Returns: 'clear && HISTFILE= HISTCONTROL=ignorespace bash -c "source /tmp/token && rm -f /tmp/token && exec claude"\r' + */ +export function buildClaudeShellCommand( + cwdCommand: string, + pathPrefix: string, + escapedClaudeCmd: string, + config: ClaudeCommandConfig +): string { + switch (config.method) { + case 'temp-file': + return `clear && ${cwdCommand}HISTFILE= HISTCONTROL=ignorespace ${pathPrefix}bash -c "source ${config.escapedTempFile} && rm -f ${config.escapedTempFile} && exec ${escapedClaudeCmd}"\r`; + + case 'config-dir': + return `clear && ${cwdCommand}HISTFILE= HISTCONTROL=ignorespace CLAUDE_CONFIG_DIR=${config.escapedConfigDir} ${pathPrefix}bash -c "exec ${escapedClaudeCmd}"\r`; + + default: + return `${cwdCommand}${pathPrefix}${escapedClaudeCmd}\r`; + } +} + +/** + * Profile information for terminal title generation + */ +interface ProfileInfo { + /** Profile name for display */ + name?: string; + /** Whether this is the default profile */ + isDefault?: boolean; +} + +/** + * Callback type for session capture + */ +type SessionCaptureCallback = (terminalId: string, projectPath: string, startTime: number) => void; + +/** + * Finalize terminal state after invoking Claude. + * + * Updates terminal title, sends IPC notification to renderer, persists session, + * and calls the session capture callback. This consolidates the post-invocation + * logic used by both sync and async invoke methods. + * + * @param terminal - The terminal process to update + * @param activeProfile - The profile being used (or undefined for default) + * @param projectPath - The project path (for session capture) + * @param startTime - Timestamp when invocation started + * @param getWindow - Function to get the BrowserWindow + * @param onSessionCapture - Callback for session capture + * + * @example + * finalizeClaudeInvoke( + * terminal, + * { name: 'Work', isDefault: false }, + * '/path/to/project', + * Date.now(), + * () => mainWindow, + * (id, path, time) => console.log('Session captured') + * ); + */ +export function finalizeClaudeInvoke( + terminal: TerminalProcess, + activeProfile: ProfileInfo | undefined, + projectPath: string | undefined, + startTime: number, + getWindow: WindowGetter, + onSessionCapture: SessionCaptureCallback +): void { + // Set terminal title based on profile + const title = activeProfile && !activeProfile.isDefault + ? `Claude (${activeProfile.name})` + : 'Claude'; + terminal.title = title; + + // Notify renderer of title change + const win = getWindow(); + if (win) { + win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, terminal.id, title); + } + + // Persist session if project path is available + if (terminal.projectPath) { + SessionHandler.persistSession(terminal); + } + + // Call session capture callback if project path provided + if (projectPath) { + onSessionCapture(terminal.id, projectPath, startTime); + } +} + /** * Handle rate limit detection and profile switching */ @@ -211,6 +345,7 @@ export function invokeClaude( debugLog('[ClaudeIntegration:invokeClaude] CWD:', cwd); terminal.isClaudeMode = true; + SessionHandler.releaseSessionId(terminal.id); terminal.claudeSessionId = undefined; const startTime = Date.now(); @@ -232,8 +367,12 @@ export function invokeClaude( isDefault: activeProfile?.isDefault }); - // Use safe shell escaping to prevent command injection const cwdCommand = buildCdCommand(cwd); + const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation(); + const escapedClaudeCmd = escapeShellArg(claudeCmd); + const pathPrefix = claudeEnv.PATH + ? `PATH=${escapeShellArg(normalizePathForBash(claudeEnv.PATH))} ` + : ''; const needsEnvOverride = profileId && profileId !== previousProfileId; debugLog('[ClaudeIntegration:invokeClaude] Environment override check:', { @@ -250,30 +389,30 @@ export function invokeClaude( }); if (token) { - const tempFile = path.join(os.tmpdir(), `.claude-token-${Date.now()}`); + const nonce = crypto.randomBytes(8).toString('hex'); + const tempFile = path.join(os.tmpdir(), `.claude-token-${Date.now()}-${nonce}`); + const escapedTempFile = escapeShellArg(tempFile); debugLog('[ClaudeIntegration:invokeClaude] Writing token to temp file:', tempFile); - fs.writeFileSync(tempFile, `export CLAUDE_CODE_OAUTH_TOKEN="${token}"\n`, { mode: 0o600 }); - - // Clear terminal and run command without adding to shell history: - // - HISTFILE= disables history file writing for the current command - // - HISTCONTROL=ignorespace causes commands starting with space to be ignored - // - Leading space ensures the command is ignored even if HISTCONTROL was already set - // - Uses subshell (...) to isolate environment changes - // This prevents temp file paths from appearing in shell history - const command = `clear && ${cwdCommand} HISTFILE= HISTCONTROL=ignorespace bash -c 'source "${tempFile}" && rm -f "${tempFile}" && exec claude'\r`; + fs.writeFileSync( + tempFile, + `export CLAUDE_CODE_OAUTH_TOKEN=${escapeShellArg(token)}\n`, + { mode: 0o600 } + ); + + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'temp-file', escapedTempFile }); debugLog('[ClaudeIntegration:invokeClaude] Executing command (temp file method, history-safe)'); terminal.pty.write(command); + profileManager.markProfileUsed(activeProfile.id); + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); debugLog('[ClaudeIntegration:invokeClaude] ========== INVOKE CLAUDE COMPLETE (temp file) =========='); return; } else if (activeProfile.configDir) { - // Clear terminal and run command without adding to shell history: - // Same history-disabling technique as temp file method above - // SECURITY: Use escapeShellArg for configDir to prevent command injection - // Set CLAUDE_CONFIG_DIR as env var before bash -c to avoid embedding user input in the command string const escapedConfigDir = escapeShellArg(activeProfile.configDir); - const command = `clear && ${cwdCommand}HISTFILE= HISTCONTROL=ignorespace CLAUDE_CONFIG_DIR=${escapedConfigDir} bash -c 'exec claude'\r`; + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'config-dir', escapedConfigDir }); debugLog('[ClaudeIntegration:invokeClaude] Executing command (configDir method, history-safe)'); terminal.pty.write(command); + profileManager.markProfileUsed(activeProfile.id); + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); debugLog('[ClaudeIntegration:invokeClaude] ========== INVOKE CLAUDE COMPLETE (configDir) =========='); return; } else { @@ -285,7 +424,7 @@ export function invokeClaude( debugLog('[ClaudeIntegration:invokeClaude] Using terminal environment for non-default profile:', activeProfile.name); } - const command = `${cwdCommand}claude\r`; + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'default' }); debugLog('[ClaudeIntegration:invokeClaude] Executing command (default method):', command); terminal.pty.write(command); @@ -293,50 +432,228 @@ export function invokeClaude( profileManager.markProfileUsed(activeProfile.id); } + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); + debugLog('[ClaudeIntegration:invokeClaude] ========== INVOKE CLAUDE COMPLETE (default) =========='); +} + +/** + * Resume Claude session in the current directory + * + * Uses `claude --continue` which resumes the most recent conversation in the + * current directory. This is simpler and more reliable than tracking session IDs, + * since Auto Claude already restores terminals to their correct cwd/projectPath. + * + * Note: The sessionId parameter is kept for backwards compatibility but is ignored. + * Claude Code's --resume flag expects user-named sessions (set via /rename), not + * internal session file IDs. + */ +export function resumeClaude( + terminal: TerminalProcess, + _sessionId: string | undefined, + getWindow: WindowGetter +): void { + terminal.isClaudeMode = true; + SessionHandler.releaseSessionId(terminal.id); + + const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation(); + const escapedClaudeCmd = escapeShellArg(claudeCmd); + const pathPrefix = claudeEnv.PATH + ? `PATH=${escapeShellArg(normalizePathForBash(claudeEnv.PATH))} ` + : ''; + + // Always use --continue which resumes the most recent session in the current directory. + // This is more reliable than --resume with session IDs since Auto Claude already restores + // terminals to their correct cwd/projectPath. + // + // Note: We clear claudeSessionId because --continue doesn't track specific sessions, + // and we don't want stale IDs persisting through SessionHandler.persistSession(). + terminal.claudeSessionId = undefined; + + // Deprecation warning for callers still passing sessionId + if (_sessionId) { + console.warn('[ClaudeIntegration:resumeClaude] sessionId parameter is deprecated and ignored; using claude --continue instead'); + } + + const command = `${pathPrefix}${escapedClaudeCmd} --continue`; + + terminal.pty.write(`${command}\r`); + + // Update terminal title in main process and notify renderer + terminal.title = 'Claude'; const win = getWindow(); if (win) { - const title = activeProfile && !activeProfile.isDefault - ? `Claude (${activeProfile.name})` - : 'Claude'; - win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, terminal.id, title); + win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, terminal.id, 'Claude'); } + // Persist session with updated title if (terminal.projectPath) { SessionHandler.persistSession(terminal); } +} - if (projectPath) { - onSessionCapture(terminal.id, projectPath, startTime); +// ============================================================================ +// ASYNC VERSIONS - Non-blocking alternatives for Electron main process +// ============================================================================ + +/** + * Invoke Claude asynchronously (non-blocking) + * + * Safe to call from Electron main process without blocking the event loop. + * Uses async CLI detection which doesn't block on subprocess calls. + */ +export async function invokeClaudeAsync( + terminal: TerminalProcess, + cwd: string | undefined, + profileId: string | undefined, + getWindow: WindowGetter, + onSessionCapture: (terminalId: string, projectPath: string, startTime: number) => void +): Promise { + debugLog('[ClaudeIntegration:invokeClaudeAsync] ========== INVOKE CLAUDE START (async) =========='); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Terminal ID:', terminal.id); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Requested profile ID:', profileId); + debugLog('[ClaudeIntegration:invokeClaudeAsync] CWD:', cwd); + + terminal.isClaudeMode = true; + SessionHandler.releaseSessionId(terminal.id); + terminal.claudeSessionId = undefined; + + const startTime = Date.now(); + const projectPath = cwd || terminal.projectPath || terminal.cwd; + + // Ensure profile manager is initialized (async, yields to event loop) + const profileManager = await initializeClaudeProfileManager(); + const activeProfile = profileId + ? profileManager.getProfile(profileId) + : profileManager.getActiveProfile(); + + const previousProfileId = terminal.claudeProfileId; + terminal.claudeProfileId = activeProfile?.id; + + debugLog('[ClaudeIntegration:invokeClaudeAsync] Profile resolution:', { + previousProfileId, + newProfileId: activeProfile?.id, + profileName: activeProfile?.name, + hasOAuthToken: !!activeProfile?.oauthToken, + isDefault: activeProfile?.isDefault + }); + + // Async CLI invocation - non-blocking + const cwdCommand = buildCdCommand(cwd); + const { command: claudeCmd, env: claudeEnv } = await getClaudeCliInvocationAsync(); + const escapedClaudeCmd = escapeShellArg(claudeCmd); + const pathPrefix = claudeEnv.PATH + ? `PATH=${escapeShellArg(normalizePathForBash(claudeEnv.PATH))} ` + : ''; + const needsEnvOverride = profileId && profileId !== previousProfileId; + + debugLog('[ClaudeIntegration:invokeClaudeAsync] Environment override check:', { + profileIdProvided: !!profileId, + previousProfileId, + needsEnvOverride + }); + + if (needsEnvOverride && activeProfile && !activeProfile.isDefault) { + const token = profileManager.getProfileToken(activeProfile.id); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Token retrieval:', { + hasToken: !!token, + tokenLength: token?.length + }); + + if (token) { + const nonce = crypto.randomBytes(8).toString('hex'); + const tempFile = path.join(os.tmpdir(), `.claude-token-${Date.now()}-${nonce}`); + const escapedTempFile = escapeShellArg(tempFile); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Writing token to temp file:', tempFile); + await fsPromises.writeFile( + tempFile, + `export CLAUDE_CODE_OAUTH_TOKEN=${escapeShellArg(token)}\n`, + { mode: 0o600 } + ); + + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'temp-file', escapedTempFile }); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Executing command (temp file method, history-safe)'); + terminal.pty.write(command); + profileManager.markProfileUsed(activeProfile.id); + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); + debugLog('[ClaudeIntegration:invokeClaudeAsync] ========== INVOKE CLAUDE COMPLETE (temp file) =========='); + return; + } else if (activeProfile.configDir) { + const escapedConfigDir = escapeShellArg(activeProfile.configDir); + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'config-dir', escapedConfigDir }); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Executing command (configDir method, history-safe)'); + terminal.pty.write(command); + profileManager.markProfileUsed(activeProfile.id); + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); + debugLog('[ClaudeIntegration:invokeClaudeAsync] ========== INVOKE CLAUDE COMPLETE (configDir) =========='); + return; + } else { + debugLog('[ClaudeIntegration:invokeClaudeAsync] WARNING: No token or configDir available for non-default profile'); + } } - debugLog('[ClaudeIntegration:invokeClaude] ========== INVOKE CLAUDE COMPLETE (default) =========='); + if (activeProfile && !activeProfile.isDefault) { + debugLog('[ClaudeIntegration:invokeClaudeAsync] Using terminal environment for non-default profile:', activeProfile.name); + } + + const command = buildClaudeShellCommand(cwdCommand, pathPrefix, escapedClaudeCmd, { method: 'default' }); + debugLog('[ClaudeIntegration:invokeClaudeAsync] Executing command (default method):', command); + terminal.pty.write(command); + + if (activeProfile) { + profileManager.markProfileUsed(activeProfile.id); + } + + finalizeClaudeInvoke(terminal, activeProfile, projectPath, startTime, getWindow, onSessionCapture); + debugLog('[ClaudeIntegration:invokeClaudeAsync] ========== INVOKE CLAUDE COMPLETE (default) =========='); } /** - * Resume Claude with optional session ID + * Resume Claude asynchronously (non-blocking) + * + * Safe to call from Electron main process without blocking the event loop. + * Uses async CLI detection which doesn't block on subprocess calls. */ -export function resumeClaude( +export async function resumeClaudeAsync( terminal: TerminalProcess, sessionId: string | undefined, getWindow: WindowGetter -): void { +): Promise { terminal.isClaudeMode = true; + SessionHandler.releaseSessionId(terminal.id); + + // Async CLI invocation - non-blocking + const { command: claudeCmd, env: claudeEnv } = await getClaudeCliInvocationAsync(); + const escapedClaudeCmd = escapeShellArg(claudeCmd); + const pathPrefix = claudeEnv.PATH + ? `PATH=${escapeShellArg(normalizePathForBash(claudeEnv.PATH))} ` + : ''; + + // Always use --continue which resumes the most recent session in the current directory. + // This is more reliable than --resume with session IDs since Auto Claude already restores + // terminals to their correct cwd/projectPath. + // + // Note: We clear claudeSessionId because --continue doesn't track specific sessions, + // and we don't want stale IDs persisting through SessionHandler.persistSession(). + terminal.claudeSessionId = undefined; - let command: string; + // Deprecation warning for callers still passing sessionId if (sessionId) { - // SECURITY: Escape sessionId to prevent command injection - command = `claude --resume ${escapeShellArg(sessionId)}`; - terminal.claudeSessionId = sessionId; - } else { - command = 'claude --continue'; + console.warn('[ClaudeIntegration:resumeClaudeAsync] sessionId parameter is deprecated and ignored; using claude --continue instead'); } + const command = `${pathPrefix}${escapedClaudeCmd} --continue`; + terminal.pty.write(`${command}\r`); + terminal.title = 'Claude'; const win = getWindow(); if (win) { win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, terminal.id, 'Claude'); } + + if (terminal.projectPath) { + SessionHandler.persistSession(terminal); + } } /** @@ -443,7 +760,7 @@ export async function switchClaudeProfile( terminal: TerminalProcess, profileId: string, getWindow: WindowGetter, - invokeClaudeCallback: (terminalId: string, cwd: string | undefined, profileId: string) => void, + invokeClaudeCallback: (terminalId: string, cwd: string | undefined, profileId: string) => Promise, clearRateLimitCallback: (terminalId: string) => void ): Promise<{ success: boolean; error?: string }> { // Always-on tracing @@ -461,7 +778,8 @@ export async function switchClaudeProfile( cwd: terminal.cwd }); - const profileManager = getClaudeProfileManager(); + // Ensure profile manager is initialized (async, yields to event loop) + const profileManager = await initializeClaudeProfileManager(); const profile = profileManager.getProfile(profileId); console.warn('[ClaudeIntegration:switchClaudeProfile] Profile found:', profile?.name || 'NOT FOUND'); @@ -529,7 +847,7 @@ export async function switchClaudeProfile( projectPath, profileId }); - invokeClaudeCallback(terminal.id, projectPath, profileId); + await invokeClaudeCallback(terminal.id, projectPath, profileId); debugLog('[ClaudeIntegration:switchClaudeProfile] Setting active profile in profile manager'); profileManager.setActiveProfile(profileId); diff --git a/apps/frontend/src/main/terminal/output-parser.ts b/apps/frontend/src/main/terminal/output-parser.ts index 72458ef254..e955935aaa 100644 --- a/apps/frontend/src/main/terminal/output-parser.ts +++ b/apps/frontend/src/main/terminal/output-parser.ts @@ -79,3 +79,83 @@ export function hasRateLimitMessage(data: string): boolean { export function hasOAuthToken(data: string): boolean { return OAUTH_TOKEN_PATTERN.test(data); } + +/** + * Patterns indicating Claude Code is busy/processing + * These appear when Claude is actively thinking or working + * + * IMPORTANT: These must be universal patterns that work for ALL users, + * not just custom terminal configurations with progress bars. + */ +const CLAUDE_BUSY_PATTERNS = [ + // Universal Claude Code indicators + /^โ—/m, // Claude's response bullet point (appears when Claude is responding) + /\u25cf/, // Unicode bullet point (โ—) + + // Tool execution indicators (Claude is running tools) + /^(Read|Write|Edit|Bash|Grep|Glob|Task|WebFetch|WebSearch|TodoWrite)\(/m, + /^\s*\d+\s*[โ”‚|]\s*/m, // Line numbers in file output (Claude reading/showing files) + + // Streaming/thinking indicators + /Loading\.\.\./i, + /Thinking\.\.\./i, + /Analyzing\.\.\./i, + /Processing\.\.\./i, + /Working\.\.\./i, + /Searching\.\.\./i, + /Creating\.\.\./i, + /Updating\.\.\./i, + /Running\.\.\./i, + + // Custom progress bar patterns (for users who have them) + /\[Opus\s*\d*\.?\d*\].*\d+%/i, // Opus model progress + /\[Sonnet\s*\d*\.?\d*\].*\d+%/i, // Sonnet model progress + /\[Haiku\s*\d*\.?\d*\].*\d+%/i, // Haiku model progress + /\[Claude\s*\d*\.?\d*\].*\d+%/i, // Generic Claude progress + /โ–‘+/, // Progress bar characters + /โ–“+/, // Progress bar characters + /โ–ˆ+/, // Progress bar characters (filled) +]; + +/** + * Patterns indicating Claude Code is idle/ready for input + * The prompt character at the start of a line indicates Claude is waiting + */ +const CLAUDE_IDLE_PATTERNS = [ + /^>\s*$/m, // Just "> " prompt on its own line + /\n>\s*$/, // "> " at end after newline + /^\s*>\s+$/m, // "> " with possible whitespace +]; + +/** + * Check if output indicates Claude is busy (processing) + */ +export function isClaudeBusyOutput(data: string): boolean { + return CLAUDE_BUSY_PATTERNS.some(pattern => pattern.test(data)); +} + +/** + * Check if output indicates Claude is idle (ready for input) + */ +export function isClaudeIdleOutput(data: string): boolean { + return CLAUDE_IDLE_PATTERNS.some(pattern => pattern.test(data)); +} + +/** + * Determine Claude busy state from output + * Returns: 'busy' | 'idle' | null (no change detected) + */ +export function detectClaudeBusyState(data: string): 'busy' | 'idle' | null { + // Check for busy indicators FIRST - they're more definitive + // Progress bars and "Loading..." mean Claude is definitely working, + // even if there's a ">" prompt visible elsewhere in the output + if (isClaudeBusyOutput(data)) { + return 'busy'; + } + // Only check for idle if no busy indicators found + // The ">" prompt alone at end of output means Claude is waiting for input + if (isClaudeIdleOutput(data)) { + return 'idle'; + } + return null; +} diff --git a/apps/frontend/src/main/terminal/pty-manager.ts b/apps/frontend/src/main/terminal/pty-manager.ts index d118dca73c..bd38c07a5c 100644 --- a/apps/frontend/src/main/terminal/pty-manager.ts +++ b/apps/frontend/src/main/terminal/pty-manager.ts @@ -5,9 +5,65 @@ import * as pty from '@lydell/node-pty'; import * as os from 'os'; +import { existsSync } from 'fs'; import type { TerminalProcess, WindowGetter } from './types'; import { IPC_CHANNELS } from '../../shared/constants'; import { getClaudeProfileManager } from '../claude-profile-manager'; +import { readSettingsFile } from '../settings-utils'; +import type { SupportedTerminal } from '../../shared/types/settings'; + +/** + * Windows shell paths for different terminal preferences + */ +const WINDOWS_SHELL_PATHS: Record = { + powershell: [ + 'C:\\Program Files\\PowerShell\\7\\pwsh.exe', // PowerShell 7 (Core) + 'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe', // Windows PowerShell 5.1 + ], + windowsterminal: [ + 'C:\\Program Files\\PowerShell\\7\\pwsh.exe', // Prefer PowerShell Core in Windows Terminal + 'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe', + ], + cmd: [ + 'C:\\Windows\\System32\\cmd.exe', + ], + gitbash: [ + 'C:\\Program Files\\Git\\bin\\bash.exe', + 'C:\\Program Files (x86)\\Git\\bin\\bash.exe', + ], + cygwin: [ + 'C:\\cygwin64\\bin\\bash.exe', + 'C:\\cygwin\\bin\\bash.exe', + ], + msys2: [ + 'C:\\msys64\\usr\\bin\\bash.exe', + 'C:\\msys32\\usr\\bin\\bash.exe', + ], +}; + +/** + * Get the Windows shell executable based on preferred terminal setting + */ +function getWindowsShell(preferredTerminal: SupportedTerminal | undefined): string { + // If no preference or 'system', use COMSPEC (usually cmd.exe) + if (!preferredTerminal || preferredTerminal === 'system') { + return process.env.COMSPEC || 'cmd.exe'; + } + + // Check if we have paths defined for this terminal type + const paths = WINDOWS_SHELL_PATHS[preferredTerminal]; + if (paths) { + // Find the first existing shell + for (const shellPath of paths) { + if (existsSync(shellPath)) { + return shellPath; + } + } + } + + // Fallback to COMSPEC for unrecognized terminals + return process.env.COMSPEC || 'cmd.exe'; +} /** * Spawn a new PTY process with appropriate shell and environment @@ -18,13 +74,25 @@ export function spawnPtyProcess( rows: number, profileEnv?: Record ): pty.IPty { + // Read user's preferred terminal setting + const settings = readSettingsFile(); + const preferredTerminal = settings?.preferredTerminal as SupportedTerminal | undefined; + const shell = process.platform === 'win32' - ? process.env.COMSPEC || 'cmd.exe' + ? getWindowsShell(preferredTerminal) : process.env.SHELL || '/bin/zsh'; const shellArgs = process.platform === 'win32' ? [] : ['-l']; - console.warn('[PtyManager] Spawning shell:', shell, shellArgs); + console.warn('[PtyManager] Spawning shell:', shell, shellArgs, '(preferred:', preferredTerminal || 'system', ')'); + + // Create a clean environment without DEBUG to prevent Claude Code from + // enabling debug mode when the Electron app is run in development mode. + // Also remove ANTHROPIC_API_KEY to ensure Claude Code uses OAuth tokens + // (CLAUDE_CODE_OAUTH_TOKEN from profileEnv) instead of API keys that may + // be present in the shell environment. Without this, Claude Code would + // show "Claude API" instead of "Claude Max" when ANTHROPIC_API_KEY is set. + const { DEBUG: _DEBUG, ANTHROPIC_API_KEY: _ANTHROPIC_API_KEY, ...cleanEnv } = process.env; return pty.spawn(shell, shellArgs, { name: 'xterm-256color', @@ -32,7 +100,7 @@ export function spawnPtyProcess( rows, cwd: cwd || os.homedir(), env: { - ...process.env, + ...cleanEnv, ...profileEnv, TERM: 'xterm-256color', COLORTERM: 'truecolor', diff --git a/apps/frontend/src/main/terminal/session-handler.ts b/apps/frontend/src/main/terminal/session-handler.ts index 9ac08fe5a7..38edfa1e1a 100644 --- a/apps/frontend/src/main/terminal/session-handler.ts +++ b/apps/frontend/src/main/terminal/session-handler.ts @@ -11,6 +11,48 @@ import { getTerminalSessionStore, type TerminalSession } from '../terminal-sessi import { IPC_CHANNELS } from '../../shared/constants'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; +/** + * Track session IDs that have been claimed by terminals to prevent race conditions. + * When multiple terminals invoke Claude simultaneously, this prevents them from + * all capturing the same session ID. + * + * Key: sessionId, Value: terminalId that claimed it + */ +const claimedSessionIds: Map = new Map(); + +/** + * Claim a session ID for a terminal. Returns true if successful, false if already claimed. + */ +export function claimSessionId(sessionId: string, terminalId: string): boolean { + const existingClaim = claimedSessionIds.get(sessionId); + if (existingClaim && existingClaim !== terminalId) { + debugLog('[SessionHandler] Session ID already claimed:', sessionId, 'by terminal:', existingClaim); + return false; + } + claimedSessionIds.set(sessionId, terminalId); + debugLog('[SessionHandler] Claimed session ID:', sessionId, 'for terminal:', terminalId); + return true; +} + +/** + * Release a session ID claim when a terminal is destroyed or session changes. + */ +export function releaseSessionId(terminalId: string): void { + for (const [sessionId, claimedBy] of claimedSessionIds.entries()) { + if (claimedBy === terminalId) { + claimedSessionIds.delete(sessionId); + debugLog('[SessionHandler] Released session ID:', sessionId, 'from terminal:', terminalId); + } + } +} + +/** + * Get all currently claimed session IDs (for exclusion during search). + */ +export function getClaimedSessionIds(): Set { + return new Set(claimedSessionIds.keys()); +} + /** * Get the Claude project slug from a project path. * Claude uses the full path with forward slashes replaced by dashes. @@ -56,9 +98,19 @@ export function findMostRecentClaudeSession(projectPath: string): string | null } /** - * Find a Claude session created/modified after a given timestamp + * Find a Claude session created/modified after a given timestamp. + * Excludes session IDs that have already been claimed by other terminals + * to prevent race conditions when multiple terminals invoke Claude simultaneously. + * + * @param projectPath - The project path to search sessions for + * @param afterTimestamp - Only consider sessions modified after this timestamp + * @param excludeSessionIds - Optional set of session IDs to exclude (already claimed) */ -export function findClaudeSessionAfter(projectPath: string, afterTimestamp: number): string | null { +export function findClaudeSessionAfter( + projectPath: string, + afterTimestamp: number, + excludeSessionIds?: Set +): string | null { const slug = getClaudeProjectSlug(projectPath); const claudeProjectDir = path.join(os.homedir(), '.claude', 'projects', slug); @@ -71,17 +123,22 @@ export function findClaudeSessionAfter(projectPath: string, afterTimestamp: numb .filter(f => f.endsWith('.jsonl')) .map(f => ({ name: f, + sessionId: f.replace('.jsonl', ''), path: path.join(claudeProjectDir, f), mtime: fs.statSync(path.join(claudeProjectDir, f)).mtime.getTime() })) .filter(f => f.mtime > afterTimestamp) + // Exclude already-claimed session IDs to prevent race conditions + .filter(f => !excludeSessionIds || !excludeSessionIds.has(f.sessionId)) .sort((a, b) => b.mtime - a.mtime); if (files.length === 0) { return null; } - return files[0].name.replace('.jsonl', ''); + const sessionId = files[0].sessionId; + debugLog('[SessionHandler] Found unclaimed session after timestamp:', sessionId, 'excluded:', excludeSessionIds?.size ?? 0); + return sessionId; } catch (error) { debugError('[SessionHandler] Error finding Claude session:', error); return null; @@ -106,7 +163,8 @@ export function persistSession(terminal: TerminalProcess): void { claudeSessionId: terminal.claudeSessionId, outputBuffer: terminal.outputBuffer, createdAt: new Date().toISOString(), - lastActiveAt: new Date().toISOString() + lastActiveAt: new Date().toISOString(), + worktreeConfig: terminal.worktreeConfig, }; store.saveSession(session); } @@ -183,7 +241,9 @@ export function getSessionsForDate(date: string, projectPath: string): TerminalS } /** - * Attempt to capture Claude session ID by polling the session directory + * Attempt to capture Claude session ID by polling the session directory. + * Uses the claim mechanism to prevent race conditions when multiple terminals + * invoke Claude simultaneously - each terminal will get a unique session ID. */ export function captureClaudeSessionId( terminalId: string, @@ -200,31 +260,44 @@ export function captureClaudeSessionId( const terminal = terminals.get(terminalId); if (!terminal || !terminal.isClaudeMode) { + debugLog('[SessionHandler] Terminal no longer in Claude mode, stopping session capture:', terminalId); return; } if (terminal.claudeSessionId) { + debugLog('[SessionHandler] Terminal already has session ID, stopping capture:', terminalId); return; } - const sessionId = findClaudeSessionAfter(projectPath, startTime); + // Get currently claimed session IDs to exclude from search + const claimedIds = getClaimedSessionIds(); + const sessionId = findClaudeSessionAfter(projectPath, startTime, claimedIds); if (sessionId) { - terminal.claudeSessionId = sessionId; - debugLog('[SessionHandler] Captured Claude session ID from directory:', sessionId); - - if (terminal.projectPath) { - updateClaudeSessionId(terminal.projectPath, terminalId, sessionId); - } - - const win = getWindow(); - if (win) { - win.webContents.send(IPC_CHANNELS.TERMINAL_CLAUDE_SESSION, terminalId, sessionId); + // Try to claim this session ID - if another terminal beat us to it, keep searching + if (claimSessionId(sessionId, terminalId)) { + terminal.claudeSessionId = sessionId; + debugLog('[SessionHandler] Captured and claimed Claude session ID:', sessionId, 'for terminal:', terminalId); + + if (terminal.projectPath) { + updateClaudeSessionId(terminal.projectPath, terminalId, sessionId); + } + + const win = getWindow(); + if (win) { + win.webContents.send(IPC_CHANNELS.TERMINAL_CLAUDE_SESSION, terminalId, sessionId); + } + } else { + // Session was claimed by another terminal, keep polling for a different one + debugLog('[SessionHandler] Session ID was claimed by another terminal, continuing to poll:', sessionId); + if (attempts < maxAttempts) { + setTimeout(checkForSession, 1000); + } } } else if (attempts < maxAttempts) { setTimeout(checkForSession, 1000); } else { - debugLog('[SessionHandler] Could not capture Claude session ID after', maxAttempts, 'attempts'); + debugLog('[SessionHandler] Could not capture Claude session ID after', maxAttempts, 'attempts for terminal:', terminalId); } }; diff --git a/apps/frontend/src/main/terminal/terminal-event-handler.ts b/apps/frontend/src/main/terminal/terminal-event-handler.ts index 79a5b07387..7f8b061dfc 100644 --- a/apps/frontend/src/main/terminal/terminal-event-handler.ts +++ b/apps/frontend/src/main/terminal/terminal-event-handler.ts @@ -6,6 +6,7 @@ import * as OutputParser from './output-parser'; import * as ClaudeIntegration from './claude-integration-handler'; import type { TerminalProcess, WindowGetter } from './types'; +import { IPC_CHANNELS } from '../../shared/constants'; /** * Event handler callbacks @@ -14,8 +15,12 @@ export interface EventHandlerCallbacks { onClaudeSessionId: (terminal: TerminalProcess, sessionId: string) => void; onRateLimit: (terminal: TerminalProcess, data: string) => void; onOAuthToken: (terminal: TerminalProcess, data: string) => void; + onClaudeBusyChange: (terminal: TerminalProcess, isBusy: boolean) => void; } +// Track the last known busy state per terminal to avoid duplicate events +const lastBusyState = new Map(); + /** * Handle terminal data output */ @@ -39,6 +44,28 @@ export function handleTerminalData( // Check for OAuth token callbacks.onOAuthToken(terminal, data); + + // Detect Claude busy state changes (only when in Claude mode) + if (terminal.isClaudeMode) { + const busyState = OutputParser.detectClaudeBusyState(data); + if (busyState !== null) { + const isBusy = busyState === 'busy'; + const lastState = lastBusyState.get(terminal.id); + + // Only emit if state actually changed + if (lastState !== isBusy) { + lastBusyState.set(terminal.id, isBusy); + callbacks.onClaudeBusyChange(terminal, isBusy); + } + } + } +} + +/** + * Clear busy state tracking for a terminal (call on terminal destruction) + */ +export function clearBusyState(terminalId: string): void { + lastBusyState.delete(terminalId); } /** @@ -64,6 +91,12 @@ export function createEventCallbacks( }, onOAuthToken: (terminal, data) => { ClaudeIntegration.handleOAuthToken(terminal, data, getWindow); + }, + onClaudeBusyChange: (terminal, isBusy) => { + const win = getWindow(); + if (win) { + win.webContents.send(IPC_CHANNELS.TERMINAL_CLAUDE_BUSY, terminal.id, isBusy); + } } }; } diff --git a/apps/frontend/src/main/terminal/terminal-lifecycle.ts b/apps/frontend/src/main/terminal/terminal-lifecycle.ts index d0ee85fbf3..22d7eaecee 100644 --- a/apps/frontend/src/main/terminal/terminal-lifecycle.ts +++ b/apps/frontend/src/main/terminal/terminal-lifecycle.ts @@ -4,6 +4,7 @@ */ import * as os from 'os'; +import { existsSync } from 'fs'; import type { TerminalCreateOptions } from '../../shared/types'; import { IPC_CHANNELS } from '../../shared/constants'; import type { TerminalSession } from '../terminal-session-store'; @@ -22,6 +23,9 @@ import { debugLog, debugError } from '../../shared/utils/debug-logger'; export interface RestoreOptions { resumeClaudeSession: boolean; captureSessionId: (terminalId: string, projectPath: string, startTime: number) => void; + /** Callback triggered when a Claude session needs to be resumed. + * Note: sessionId is deprecated and ignored - resumeClaude uses --continue */ + onResumeNeeded?: (terminalId: string, sessionId: string | undefined) => void; } /** @@ -54,8 +58,16 @@ export async function createTerminal( debugLog('[TerminalLifecycle] Injecting OAuth token from active profile'); } + // Validate cwd exists - if the directory doesn't exist (e.g., worktree removed), + // fall back to project path to prevent shell exit with code 1 + let effectiveCwd = cwd; + if (cwd && !existsSync(cwd)) { + debugLog('[TerminalLifecycle] Terminal cwd does not exist, falling back:', cwd, '->', projectPath || os.homedir()); + effectiveCwd = projectPath || os.homedir(); + } + const ptyProcess = PtyManager.spawnPtyProcess( - cwd || os.homedir(), + effectiveCwd || os.homedir(), cols, rows, profileEnv @@ -63,7 +75,7 @@ export async function createTerminal( debugLog('[TerminalLifecycle] PTY process spawned, pid:', ptyProcess.pid); - const terminalCwd = cwd || os.homedir(); + const terminalCwd = effectiveCwd || os.homedir(); const terminal: TerminalProcess = { id, pty: ptyProcess, @@ -111,12 +123,31 @@ export async function restoreTerminal( cols = 80, rows = 24 ): Promise { - debugLog('[TerminalLifecycle] Restoring terminal session:', session.id, 'Claude mode:', session.isClaudeMode); + // Look up the stored session to get the correct isClaudeMode value + // The renderer may pass isClaudeMode: false (by design), but we need the stored value + // to determine whether to auto-resume Claude + const storedSessions = SessionHandler.getSavedSessions(session.projectPath); + const storedSession = storedSessions.find(s => s.id === session.id); + const storedIsClaudeMode = storedSession?.isClaudeMode ?? session.isClaudeMode; + const storedClaudeSessionId = storedSession?.claudeSessionId ?? session.claudeSessionId; + + debugLog('[TerminalLifecycle] Restoring terminal session:', session.id, + 'Passed Claude mode:', session.isClaudeMode, + 'Stored Claude mode:', storedIsClaudeMode, + 'Stored session ID:', storedClaudeSessionId); + + // Validate cwd exists - if the directory was deleted (e.g., worktree removed), + // fall back to project path to prevent shell exit with code 1 + let effectiveCwd = session.cwd; + if (!existsSync(session.cwd)) { + debugLog('[TerminalLifecycle] Session cwd does not exist, falling back to project path:', session.cwd, '->', session.projectPath); + effectiveCwd = session.projectPath || os.homedir(); + } const result = await createTerminal( { id: session.id, - cwd: session.cwd, + cwd: effectiveCwd, cols, rows, projectPath: session.projectPath @@ -135,19 +166,55 @@ export async function restoreTerminal( return { success: false, error: 'Terminal not found after creation' }; } + // Restore title and worktree config from session terminal.title = session.title; + // Only restore worktree config if the worktree directory still exists + // (effectiveCwd matching session.cwd means no fallback was needed) + if (effectiveCwd === session.cwd) { + terminal.worktreeConfig = session.worktreeConfig; + } else { + // Worktree was deleted, clear the config and update terminal's cwd + terminal.worktreeConfig = undefined; + terminal.cwd = effectiveCwd; + debugLog('[TerminalLifecycle] Cleared worktree config for terminal with deleted worktree:', session.id); + } - // Restore Claude mode state without sending resume commands - // The PTY daemon keeps processes alive, so we just need to reconnect to the existing session - if (session.isClaudeMode) { - terminal.isClaudeMode = true; - terminal.claudeSessionId = session.claudeSessionId; + // Re-persist after restoring title and worktreeConfig + // (createTerminal persists before these are set, so we need to persist again) + if (terminal.projectPath) { + SessionHandler.persistSession(terminal); + } - debugLog('[TerminalLifecycle] Restored Claude mode state for session:', session.id, 'sessionId:', session.claudeSessionId); + // Send title change event for all restored terminals so renderer updates + const win = getWindow(); + if (win) { + win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, session.id, session.title); + } + + // Defer Claude resume until terminal becomes active (is viewed by user) + // This prevents all terminals from resuming Claude simultaneously on app startup, + // which can cause crashes and resource contention. + // + // Use storedIsClaudeMode which comes from the persisted store, + // not the renderer-passed values (renderer always passes isClaudeMode: false) + if (options.resumeClaudeSession && storedIsClaudeMode) { + // Set Claude mode so it persists correctly across app restarts + // Without this, storedIsClaudeMode would be false on next restore + terminal.isClaudeMode = true; + // Mark terminal as having a pending Claude resume + // The actual resume will be triggered when the terminal becomes active + terminal.pendingClaudeResume = true; + debugLog('[TerminalLifecycle] Marking terminal for deferred Claude resume:', terminal.id); - const win = getWindow(); + // Notify renderer that this terminal has a pending Claude resume + // The renderer will trigger the resume when the terminal tab becomes active if (win) { - win.webContents.send(IPC_CHANNELS.TERMINAL_TITLE_CHANGE, session.id, session.title); + win.webContents.send(IPC_CHANNELS.TERMINAL_PENDING_RESUME, terminal.id, storedClaudeSessionId); + } + + // Persist the Claude mode and pending resume state + if (terminal.projectPath) { + SessionHandler.persistSession(terminal); } } @@ -172,6 +239,8 @@ export async function destroyTerminal( try { SessionHandler.removePersistedSession(terminal); + // Release any claimed session ID for this terminal + SessionHandler.releaseSessionId(id); onCleanup(id); PtyManager.killPty(terminal); terminals.delete(id); diff --git a/apps/frontend/src/main/terminal/terminal-manager.ts b/apps/frontend/src/main/terminal/terminal-manager.ts index f2ab44a7e2..52b83a01f0 100644 --- a/apps/frontend/src/main/terminal/terminal-manager.ts +++ b/apps/frontend/src/main/terminal/terminal-manager.ts @@ -80,6 +80,12 @@ export class TerminalManager { this.terminals, this.getWindow ); + }, + onResumeNeeded: (terminalId, sessionId) => { + // Use async version to avoid blocking main process + this.resumeClaudeAsync(terminalId, sessionId).catch((error) => { + console.error('[terminal-manager] Failed to resume Claude session:', error); + }); } }, cols, @@ -130,8 +136,35 @@ export class TerminalManager { } } + /** + * Invoke Claude in a terminal with optional profile override (async - non-blocking) + */ + async invokeClaudeAsync(id: string, cwd?: string, profileId?: string): Promise { + const terminal = this.terminals.get(id); + if (!terminal) { + return; + } + + await ClaudeIntegration.invokeClaudeAsync( + terminal, + cwd, + profileId, + this.getWindow, + (terminalId, projectPath, startTime) => { + SessionHandler.captureClaudeSessionId( + terminalId, + projectPath, + startTime, + this.terminals, + this.getWindow + ); + } + ); + } + /** * Invoke Claude in a terminal with optional profile override + * @deprecated Use invokeClaudeAsync for non-blocking behavior */ invokeClaude(id: string, cwd?: string, profileId?: string): void { const terminal = this.terminals.get(id); @@ -169,13 +202,48 @@ export class TerminalManager { terminal, profileId, this.getWindow, - (terminalId, cwd, profileId) => this.invokeClaude(terminalId, cwd, profileId), + async (terminalId, cwd, profileId) => this.invokeClaudeAsync(terminalId, cwd, profileId), (terminalId) => this.lastNotifiedRateLimitReset.delete(terminalId) ); } + /** + * Resume Claude in a terminal asynchronously (non-blocking) + */ + async resumeClaudeAsync(id: string, sessionId?: string): Promise { + const terminal = this.terminals.get(id); + if (!terminal) { + return; + } + + await ClaudeIntegration.resumeClaudeAsync(terminal, sessionId, this.getWindow); + } + + /** + * Activate deferred Claude resume for a terminal + * Called when a terminal with pendingClaudeResume becomes active (user views it) + */ + async activateDeferredResume(id: string): Promise { + const terminal = this.terminals.get(id); + if (!terminal) { + return; + } + + // Check if terminal has a pending resume + if (!terminal.pendingClaudeResume) { + return; + } + + // Clear the pending flag + terminal.pendingClaudeResume = false; + + // Now actually resume Claude + await ClaudeIntegration.resumeClaudeAsync(terminal, undefined, this.getWindow); + } + /** * Resume Claude in a terminal with a specific session ID + * @deprecated Use resumeClaudeAsync for non-blocking behavior */ resumeClaude(id: string, sessionId?: string): void { const terminal = this.terminals.get(id); @@ -239,6 +307,12 @@ export class TerminalManager { this.terminals, this.getWindow ); + }, + onResumeNeeded: (terminalId, sessionId) => { + // Use async version to avoid blocking main process + this.resumeClaudeAsync(terminalId, sessionId).catch((error) => { + console.error('[terminal-manager] Failed to resume Claude session:', error); + }); } }, cols, @@ -279,6 +353,20 @@ export class TerminalManager { } } + /** + * Update terminal worktree config + */ + setWorktreeConfig(id: string, config: import('../../shared/types').TerminalWorktreeConfig | undefined): void { + const terminal = this.terminals.get(id); + if (terminal) { + terminal.worktreeConfig = config; + // Persist immediately when worktree config changes + if (terminal.projectPath) { + SessionHandler.persistSession(terminal); + } + } + } + /** * Check if a terminal's PTY process is alive */ diff --git a/apps/frontend/src/main/terminal/types.ts b/apps/frontend/src/main/terminal/types.ts index 7a3618909d..b8ef101230 100644 --- a/apps/frontend/src/main/terminal/types.ts +++ b/apps/frontend/src/main/terminal/types.ts @@ -1,5 +1,6 @@ import type * as pty from '@lydell/node-pty'; import type { BrowserWindow } from 'electron'; +import type { TerminalWorktreeConfig } from '../../shared/types'; /** * Terminal process tracking @@ -14,6 +15,10 @@ export interface TerminalProcess { claudeProfileId?: string; outputBuffer: string; title: string; + /** Associated worktree configuration (persisted across restarts) */ + worktreeConfig?: TerminalWorktreeConfig; + /** Whether this terminal has a pending Claude resume that should be triggered on activation */ + pendingClaudeResume?: boolean; } /** diff --git a/apps/frontend/src/main/updater/config.ts b/apps/frontend/src/main/updater/config.ts deleted file mode 100644 index 982042a66d..0000000000 --- a/apps/frontend/src/main/updater/config.ts +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Configuration for Auto Claude updater - */ - -/** - * GitHub repository configuration - */ -export const GITHUB_CONFIG = { - owner: 'AndyMik90', - repo: 'Auto-Claude', - autoBuildPath: 'apps/backend' // Path within repo where auto-claude backend lives -} as const; - -/** - * Files and directories to preserve during updates - */ -export const PRESERVE_FILES = ['.env', 'specs'] as const; - -/** - * Files and directories to skip when copying - */ -export const SKIP_FILES = ['__pycache__', '.DS_Store', '.git', 'specs', '.env'] as const; - -/** - * Update-related timeouts (in milliseconds) - */ -export const TIMEOUTS = { - requestTimeout: 10000, - downloadTimeout: 60000 -} as const; diff --git a/apps/frontend/src/main/updater/file-operations.ts b/apps/frontend/src/main/updater/file-operations.ts deleted file mode 100644 index b948631ddc..0000000000 --- a/apps/frontend/src/main/updater/file-operations.ts +++ /dev/null @@ -1,135 +0,0 @@ -/** - * File operation utilities for updates - */ - -import { existsSync, mkdirSync, readdirSync, statSync, copyFileSync, readFileSync, writeFileSync, rmSync } from 'fs'; -import path from 'path'; -import { exec } from 'child_process'; -import { promisify } from 'util'; -import { SKIP_FILES } from './config'; - -const execAsync = promisify(exec); - -/** - * Extract a .tar.gz file - * Uses system tar command on Unix or PowerShell on Windows - */ -export async function extractTarball(tarballPath: string, destPath: string): Promise { - try { - if (process.platform === 'win32') { - // On Windows, try multiple approaches: - // 1. Modern Windows 10/11 has built-in tar - // 2. Fall back to PowerShell's Expand-Archive for .zip (but .tar.gz needs tar) - // 3. Use PowerShell to extract via .NET - try { - // First try native tar (available on Windows 10 1803+) - await execAsync(`tar -xzf "${tarballPath}" -C "${destPath}"`); - } catch { - // Fall back to PowerShell with .NET for gzip decompression - // This is more complex but works on older Windows versions - const psScript = ` - $tarball = "${tarballPath.replace(/\\/g, '\\\\')}" - $dest = "${destPath.replace(/\\/g, '\\\\')}" - $tempTar = Join-Path $env:TEMP "auto-claude-update.tar" - - # Decompress gzip - $gzipStream = [System.IO.File]::OpenRead($tarball) - $decompressedStream = New-Object System.IO.Compression.GZipStream($gzipStream, [System.IO.Compression.CompressionMode]::Decompress) - $tarStream = [System.IO.File]::Create($tempTar) - $decompressedStream.CopyTo($tarStream) - $tarStream.Close() - $decompressedStream.Close() - $gzipStream.Close() - - # Extract tar using tar command (should work even if gzip didn't) - tar -xf $tempTar -C $dest - Remove-Item $tempTar -Force - `; - await execAsync(`powershell -NoProfile -Command "${psScript.replace(/"/g, '\\"').replace(/\n/g, ' ')}"`); - } - } else { - // Unix systems - use native tar - await execAsync(`tar -xzf "${tarballPath}" -C "${destPath}"`); - } - } catch (error) { - throw new Error(`Failed to extract tarball: ${error instanceof Error ? error.message : 'Unknown error'}`); - } -} - -/** - * Recursively copy directory - */ -export function copyDirectoryRecursive( - src: string, - dest: string, - preserveExisting: boolean = false -): void { - if (!existsSync(dest)) { - mkdirSync(dest, { recursive: true }); - } - - const entries = readdirSync(src, { withFileTypes: true }); - - for (const entry of entries) { - const srcPath = path.join(src, entry.name); - const destPath = path.join(dest, entry.name); - - // Skip certain files/directories - if (SKIP_FILES.includes(entry.name as (typeof SKIP_FILES)[number])) { - continue; - } - - // In preserve mode, skip existing files - if (preserveExisting && existsSync(destPath)) { - if (entry.isDirectory()) { - copyDirectoryRecursive(srcPath, destPath, preserveExisting); - } - continue; - } - - if (entry.isDirectory()) { - copyDirectoryRecursive(srcPath, destPath, preserveExisting); - } else { - copyFileSync(srcPath, destPath); - } - } -} - -/** - * Preserve specified files before update - */ -export function preserveFiles(targetPath: string, filesToPreserve: readonly string[]): Record { - const preservedContent: Record = {}; - - for (const file of filesToPreserve) { - const filePath = path.join(targetPath, file); - if (existsSync(filePath)) { - if (!statSync(filePath).isDirectory()) { - preservedContent[file] = readFileSync(filePath); - } - } - } - - return preservedContent; -} - -/** - * Restore preserved files after update - */ -export function restoreFiles(targetPath: string, preservedContent: Record): void { - for (const [file, content] of Object.entries(preservedContent)) { - writeFileSync(path.join(targetPath, file), content); - } -} - -/** - * Clean target directory while preserving specified files - */ -export function cleanTargetDirectory(targetPath: string, preserveFiles: readonly string[]): void { - const items = readdirSync(targetPath); - for (const item of items) { - if (!preserveFiles.includes(item)) { - rmSync(path.join(targetPath, item), { recursive: true, force: true }); - } - } -} diff --git a/apps/frontend/src/main/updater/http-client.ts b/apps/frontend/src/main/updater/http-client.ts deleted file mode 100644 index ada5f5d41a..0000000000 --- a/apps/frontend/src/main/updater/http-client.ts +++ /dev/null @@ -1,189 +0,0 @@ -/** - * HTTP client utilities for fetching updates - */ - -import https from 'https'; -import { createWriteStream } from 'fs'; -import { TIMEOUTS, GITHUB_CONFIG } from './config'; - -/** - * Fetch JSON from a URL using https - */ -export function fetchJson(url: string): Promise { - return new Promise((resolve, reject) => { - const headers = { - 'User-Agent': 'Auto-Claude-UI', - 'Accept': 'application/vnd.github+json' - }; - - const request = https.get(url, { headers }, (response) => { - // Handle redirects - if (response.statusCode === 301 || response.statusCode === 302) { - const redirectUrl = response.headers.location; - if (redirectUrl) { - fetchJson(redirectUrl).then(resolve).catch(reject); - return; - } - } - - // Handle HTTP 300 Multiple Choices (branch/tag name collision) - if (response.statusCode === 300) { - let data = ''; - response.on('data', chunk => data += chunk); - response.on('end', () => { - console.error('[HTTP] Multiple choices for resource:', { - url, - statusCode: 300, - response: data - }); - reject(new Error( - `Multiple resources found for ${url}. ` + - `This usually means a branch and tag have the same name. ` + - `Please report this issue at https://github.com/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/issues` - )); - }); - response.on('error', reject); - return; - } - - if (response.statusCode !== 200) { - // Collect response body for error details (limit to 10KB) - const maxErrorSize = 10 * 1024; - let errorData = ''; - response.on('data', chunk => { - if (errorData.length < maxErrorSize) { - errorData += chunk.toString().slice(0, maxErrorSize - errorData.length); - } - }); - response.on('end', () => { - const errorMsg = `HTTP ${response.statusCode}: ${errorData || response.statusMessage || 'No error details'}`; - reject(new Error(errorMsg)); - }); - response.on('error', reject); - return; - } - - let data = ''; - response.on('data', chunk => data += chunk); - response.on('end', () => { - try { - resolve(JSON.parse(data) as T); - } catch (_e) { - reject(new Error('Failed to parse JSON response')); - } - }); - response.on('error', reject); - }); - - request.on('error', reject); - request.setTimeout(TIMEOUTS.requestTimeout, () => { - request.destroy(); - reject(new Error('Request timeout')); - }); - }); -} - -/** - * Download a file with progress tracking - */ -export function downloadFile( - url: string, - destPath: string, - onProgress?: (percent: number) => void -): Promise { - return new Promise((resolve, reject) => { - const file = createWriteStream(destPath); - - // GitHub API URLs need the GitHub Accept header to get a redirect to the actual file - // Non-API URLs (CDN, direct downloads) use octet-stream - const isGitHubApi = url.includes('api.github.com'); - const headers = { - 'User-Agent': 'Auto-Claude-UI', - 'Accept': isGitHubApi ? 'application/vnd.github+json' : 'application/octet-stream' - }; - - const request = https.get(url, { headers }, (response) => { - // Handle redirects - if (response.statusCode === 301 || response.statusCode === 302) { - file.close(); - const redirectUrl = response.headers.location; - if (redirectUrl) { - downloadFile(redirectUrl, destPath, onProgress).then(resolve).catch(reject); - return; - } - } - - // Handle HTTP 300 Multiple Choices (branch/tag name collision) - if (response.statusCode === 300) { - file.close(); - let data = ''; - response.on('data', chunk => data += chunk); - response.on('end', () => { - console.error('[HTTP] Multiple choices for resource:', { - url, - statusCode: 300, - response: data - }); - reject(new Error( - `Multiple resources found for ${url}. ` + - `This usually means a branch and tag have the same name. ` + - `Please download the latest version manually from: ` + - `https://github.com/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/releases/latest` - )); - }); - response.on('error', reject); - return; - } - - if (response.statusCode !== 200) { - file.close(); - // Collect response body for error details (limit to 10KB) - const maxErrorSize = 10 * 1024; - let errorData = ''; - response.on('data', chunk => { - if (errorData.length < maxErrorSize) { - errorData += chunk.toString().slice(0, maxErrorSize - errorData.length); - } - }); - response.on('end', () => { - const errorMsg = `HTTP ${response.statusCode}: ${errorData || response.statusMessage || 'No error details'}`; - reject(new Error(errorMsg)); - }); - response.on('error', reject); - return; - } - - const totalSize = parseInt(response.headers['content-length'] || '0', 10); - let downloadedSize = 0; - - response.on('data', (chunk) => { - downloadedSize += chunk.length; - if (totalSize > 0 && onProgress) { - onProgress(Math.round((downloadedSize / totalSize) * 100)); - } - }); - - response.pipe(file); - - file.on('finish', () => { - file.close(); - resolve(); - }); - - file.on('error', (err) => { - file.close(); - reject(err); - }); - }); - - request.on('error', (err) => { - file.close(); - reject(err); - }); - - request.setTimeout(TIMEOUTS.downloadTimeout, () => { - request.destroy(); - reject(new Error('Download timeout')); - }); - }); -} diff --git a/apps/frontend/src/main/updater/types.ts b/apps/frontend/src/main/updater/types.ts deleted file mode 100644 index d1e0b2c5f7..0000000000 --- a/apps/frontend/src/main/updater/types.ts +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Type definitions for Auto Claude updater system - */ - -/** - * GitHub Release API response (partial) - */ -export interface GitHubRelease { - tag_name: string; - name: string; - body: string; - html_url: string; - tarball_url: string; - published_at: string; - prerelease: boolean; - draft: boolean; -} - -/** - * Result of checking for updates - */ -export interface AutoBuildUpdateCheck { - updateAvailable: boolean; - currentVersion: string; - latestVersion?: string; - releaseNotes?: string; - releaseUrl?: string; - error?: string; -} - -/** - * Result of applying an update - */ -export interface AutoBuildUpdateResult { - success: boolean; - version?: string; - error?: string; -} - -/** - * Update progress stages - */ -export type UpdateStage = 'checking' | 'downloading' | 'extracting' | 'complete' | 'error'; - -/** - * Progress callback for download - */ -export type UpdateProgressCallback = (progress: { - stage: UpdateStage; - percent?: number; - message: string; -}) => void; - -/** - * Update metadata stored after successful update - */ -export interface UpdateMetadata { - version: string; - updatedAt: string; - source: string; - releaseTag: string; - releaseName: string; -} diff --git a/apps/frontend/src/main/updater/update-checker.ts b/apps/frontend/src/main/updater/update-checker.ts deleted file mode 100644 index 2f04d93348..0000000000 --- a/apps/frontend/src/main/updater/update-checker.ts +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Update checking functionality - */ - -import { GITHUB_CONFIG } from './config'; -import { fetchJson } from './http-client'; -import { getEffectiveVersion, parseVersionFromTag, compareVersions } from './version-manager'; -import { GitHubRelease, AutoBuildUpdateCheck } from './types'; -import { debugLog } from '../../shared/utils/debug-logger'; - -// Cache for the latest release info (used by download) -let cachedLatestRelease: GitHubRelease | null = null; - -/** - * Get cached release (if available) - */ -export function getCachedRelease(): GitHubRelease | null { - return cachedLatestRelease; -} - -/** - * Set cached release - */ -export function setCachedRelease(release: GitHubRelease | null): void { - cachedLatestRelease = release; -} - -/** - * Clear cached release - */ -export function clearCachedRelease(): void { - cachedLatestRelease = null; -} - -/** - * Check GitHub Releases for the latest version - */ -export async function checkForUpdates(): Promise { - // Use effective version which accounts for source updates - const currentVersion = getEffectiveVersion(); - debugLog('[UpdateCheck] Current effective version:', currentVersion); - - try { - // Fetch latest release from GitHub Releases API - const releaseUrl = `https://api.github.com/repos/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/releases/latest`; - const release = await fetchJson(releaseUrl); - - // Cache for download function - setCachedRelease(release); - - // Parse version from tag (e.g., "v1.2.0" -> "1.2.0") - const latestVersion = parseVersionFromTag(release.tag_name); - debugLog('[UpdateCheck] Latest version:', latestVersion); - - // Compare versions - const updateAvailable = compareVersions(latestVersion, currentVersion) > 0; - debugLog('[UpdateCheck] Update available:', updateAvailable); - - return { - updateAvailable, - currentVersion, - latestVersion, - releaseNotes: release.body || undefined, - releaseUrl: release.html_url || undefined - }; - } catch (error) { - // Clear cache on error - clearCachedRelease(); - debugLog('[UpdateCheck] Error:', error instanceof Error ? error.message : error); - - return { - updateAvailable: false, - currentVersion, - error: error instanceof Error ? error.message : 'Failed to check for updates' - }; - } -} diff --git a/apps/frontend/src/main/updater/update-installer.ts b/apps/frontend/src/main/updater/update-installer.ts deleted file mode 100644 index a4e2d350db..0000000000 --- a/apps/frontend/src/main/updater/update-installer.ts +++ /dev/null @@ -1,224 +0,0 @@ -/** - * Update installation and application - */ - -import { existsSync, mkdirSync, writeFileSync, rmSync, readdirSync } from 'fs'; -import path from 'path'; -import { app } from 'electron'; -import { GITHUB_CONFIG, PRESERVE_FILES } from './config'; -import { downloadFile, fetchJson } from './http-client'; -import { parseVersionFromTag } from './version-manager'; -import { getUpdateCachePath, getUpdateTargetPath } from './path-resolver'; -import { extractTarball, copyDirectoryRecursive, preserveFiles, restoreFiles, cleanTargetDirectory } from './file-operations'; -import { getCachedRelease, setCachedRelease, clearCachedRelease } from './update-checker'; -import { GitHubRelease, AutoBuildUpdateResult, UpdateProgressCallback, UpdateMetadata } from './types'; -import { debugLog } from '../../shared/utils/debug-logger'; - -/** - * Download and apply the latest auto-claude update from GitHub Releases - * - * Note: In production, this updates the bundled source in userData. - * For packaged apps, we can't modify resourcesPath directly, - * so we use a "source override" system. - */ -export async function downloadAndApplyUpdate( - onProgress?: UpdateProgressCallback -): Promise { - const cachePath = getUpdateCachePath(); - - debugLog('[Update] Starting update process...'); - debugLog('[Update] Cache path:', cachePath); - - try { - onProgress?.({ - stage: 'checking', - message: 'Fetching release info...' - }); - - // Ensure cache directory exists - if (!existsSync(cachePath)) { - mkdirSync(cachePath, { recursive: true }); - debugLog('[Update] Created cache directory'); - } - - // Get release info (use cache or fetch fresh) - let release = getCachedRelease(); - if (!release) { - const releaseUrl = `https://api.github.com/repos/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/releases/latest`; - debugLog('[Update] Fetching release info from:', releaseUrl); - release = await fetchJson(releaseUrl); - setCachedRelease(release); - } else { - debugLog('[Update] Using cached release info'); - } - - // Use explicit tag reference URL to avoid HTTP 300 when branch/tag names collide - // See: https://github.com/AndyMik90/Auto-Claude/issues/78 - const tarballUrl = `https://api.github.com/repos/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/tarball/refs/tags/${release.tag_name}`; - const releaseVersion = parseVersionFromTag(release.tag_name); - debugLog('[Update] Release version:', releaseVersion); - debugLog('[Update] Tarball URL:', tarballUrl); - - const tarballPath = path.join(cachePath, 'auto-claude-update.tar.gz'); - const extractPath = path.join(cachePath, 'extracted'); - - // Clean up previous extraction - if (existsSync(extractPath)) { - rmSync(extractPath, { recursive: true, force: true }); - } - mkdirSync(extractPath, { recursive: true }); - - onProgress?.({ - stage: 'downloading', - percent: 0, - message: 'Downloading update...' - }); - - debugLog('[Update] Starting download to:', tarballPath); - - // Download the tarball - await downloadFile(tarballUrl, tarballPath, (percent) => { - onProgress?.({ - stage: 'downloading', - percent, - message: `Downloading... ${percent}%` - }); - }); - - debugLog('[Update] Download complete'); - - onProgress?.({ - stage: 'extracting', - message: 'Extracting update...' - }); - - debugLog('[Update] Extracting to:', extractPath); - - // Extract the tarball - await extractTarball(tarballPath, extractPath); - - debugLog('[Update] Extraction complete'); - - // Find the auto-claude folder in extracted content - // GitHub tarballs have a root folder like "owner-repo-hash/" - const extractedDirs = readdirSync(extractPath); - if (extractedDirs.length === 0) { - throw new Error('Empty tarball'); - } - - const rootDir = path.join(extractPath, extractedDirs[0]); - const autoBuildSource = path.join(rootDir, GITHUB_CONFIG.autoBuildPath); - - if (!existsSync(autoBuildSource)) { - throw new Error('auto-claude folder not found in download'); - } - - // Determine where to install the update - const targetPath = getUpdateTargetPath(); - debugLog('[Update] Target install path:', targetPath); - - // Backup existing source (if in dev mode) - const backupPath = path.join(cachePath, 'backup'); - if (!app.isPackaged && existsSync(targetPath)) { - if (existsSync(backupPath)) { - rmSync(backupPath, { recursive: true, force: true }); - } - // Simple copy for backup - debugLog('[Update] Creating backup at:', backupPath); - copyDirectoryRecursive(targetPath, backupPath); - } - - // Apply the update - debugLog('[Update] Applying update...'); - await applyUpdate(targetPath, autoBuildSource); - debugLog('[Update] Update applied successfully'); - - // Write update metadata - const metadata: UpdateMetadata = { - version: releaseVersion, - updatedAt: new Date().toISOString(), - source: 'github-release', - releaseTag: release.tag_name, - releaseName: release.name - }; - writeUpdateMetadata(targetPath, metadata); - - // Clear the cache after successful update - clearCachedRelease(); - - // Cleanup - rmSync(tarballPath, { force: true }); - rmSync(extractPath, { recursive: true, force: true }); - - onProgress?.({ - stage: 'complete', - message: `Updated to version ${releaseVersion}` - }); - - debugLog('[Update] ============================================'); - debugLog('[Update] UPDATE SUCCESSFUL'); - debugLog('[Update] New version:', releaseVersion); - debugLog('[Update] Target path:', targetPath); - debugLog('[Update] ============================================'); - - return { - success: true, - version: releaseVersion - }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : 'Update failed'; - debugLog('[Update] ============================================'); - debugLog('[Update] UPDATE FAILED'); - debugLog('[Update] Error:', errorMessage); - debugLog('[Update] ============================================'); - - // Provide user-friendly error message for HTTP 300 errors - let displayMessage = errorMessage; - if (errorMessage.includes('Multiple resources found')) { - displayMessage = - `Update failed due to repository configuration issue (HTTP 300). ` + - `Please download the latest version manually from: ` + - `https://github.com/${GITHUB_CONFIG.owner}/${GITHUB_CONFIG.repo}/releases/latest`; - } - - onProgress?.({ - stage: 'error', - message: displayMessage - }); - - return { - success: false, - error: displayMessage - }; - } -} - -/** - * Apply update to target directory - */ -async function applyUpdate(targetPath: string, sourcePath: string): Promise { - if (existsSync(targetPath)) { - // Preserve important files - const preservedContent = preserveFiles(targetPath, PRESERVE_FILES); - - // Clean target but preserve certain files - cleanTargetDirectory(targetPath, PRESERVE_FILES); - - // Copy new files - copyDirectoryRecursive(sourcePath, targetPath, true); - - // Restore preserved files that might have been overwritten - restoreFiles(targetPath, preservedContent); - } else { - mkdirSync(targetPath, { recursive: true }); - copyDirectoryRecursive(sourcePath, targetPath, false); - } -} - -/** - * Write update metadata to disk - */ -function writeUpdateMetadata(targetPath: string, metadata: UpdateMetadata): void { - const metadataPath = path.join(targetPath, '.update-metadata.json'); - writeFileSync(metadataPath, JSON.stringify(metadata, null, 2)); -} diff --git a/apps/frontend/src/main/updater/update-status.ts b/apps/frontend/src/main/updater/update-status.ts deleted file mode 100644 index 93ec5e29c0..0000000000 --- a/apps/frontend/src/main/updater/update-status.ts +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Update status checking utilities - */ - -import { existsSync, readFileSync } from 'fs'; -import path from 'path'; -import { app } from 'electron'; -import { getBundledVersion, compareVersions } from './version-manager'; -import { UpdateMetadata } from './types'; - -/** - * Check if there's a pending source update that requires restart - */ -export function hasPendingSourceUpdate(): boolean { - if (!app.isPackaged) { - return false; - } - - const overridePath = path.join(app.getPath('userData'), 'auto-claude-source'); - const metadataPath = path.join(overridePath, '.update-metadata.json'); - - if (!existsSync(metadataPath)) { - return false; - } - - try { - const metadata = JSON.parse(readFileSync(metadataPath, 'utf-8')) as UpdateMetadata; - const bundledVersion = getBundledVersion(); - return compareVersions(metadata.version, bundledVersion) > 0; - } catch { - return false; - } -} - -/** - * Get update metadata if available - */ -export function getUpdateMetadata(): UpdateMetadata | null { - const overridePath = path.join(app.getPath('userData'), 'auto-claude-source'); - const metadataPath = path.join(overridePath, '.update-metadata.json'); - - if (!existsSync(metadataPath)) { - return null; - } - - try { - return JSON.parse(readFileSync(metadataPath, 'utf-8')) as UpdateMetadata; - } catch { - return null; - } -} diff --git a/apps/frontend/src/main/updater/version-manager.ts b/apps/frontend/src/main/updater/version-manager.ts index 92edcb8bd7..0924bd7e92 100644 --- a/apps/frontend/src/main/updater/version-manager.ts +++ b/apps/frontend/src/main/updater/version-manager.ts @@ -1,96 +1,22 @@ /** * Version management utilities + * + * Simplified version that uses only the bundled app version. + * The "source updater" system has been removed since the backend + * is bundled with the app and updates via electron-updater. */ import { app } from 'electron'; -import { existsSync, readFileSync } from 'fs'; -import path from 'path'; -import type { UpdateMetadata } from './types'; /** * Get the current app/framework version from package.json * - * Uses app.getVersion() (from package.json) as the base version. + * Uses app.getVersion() (from package.json) as the version. */ export function getBundledVersion(): string { return app.getVersion(); } -/** - * Get the effective version - accounts for source updates - * - * Returns the updated source version if an update has been applied, - * otherwise returns the bundled version. - */ -export function getEffectiveVersion(): string { - const isDebug = process.env.DEBUG === 'true'; - - // Build list of paths to check for update metadata - const metadataPaths: string[] = []; - - if (app.isPackaged) { - // Production: check userData override path - metadataPaths.push( - path.join(app.getPath('userData'), 'auto-claude-source', '.update-metadata.json') - ); - } else { - // Development: check the actual source paths where updates are written - const possibleSourcePaths = [ - // Apps structure: apps/backend - path.join(app.getAppPath(), '..', 'backend'), - path.join(process.cwd(), 'apps', 'backend'), - path.resolve(__dirname, '..', '..', '..', 'backend') - ]; - - for (const sourcePath of possibleSourcePaths) { - metadataPaths.push(path.join(sourcePath, '.update-metadata.json')); - } - } - - if (isDebug) { - console.log('[Version] Checking metadata paths:', metadataPaths); - } - - // Check each path for metadata - for (const metadataPath of metadataPaths) { - const exists = existsSync(metadataPath); - if (isDebug) { - console.log(`[Version] Checking ${metadataPath}: ${exists ? 'EXISTS' : 'not found'}`); - } - if (exists) { - try { - const metadata = JSON.parse(readFileSync(metadataPath, 'utf-8')) as UpdateMetadata; - if (metadata.version) { - if (isDebug) { - console.log(`[Version] Found metadata version: ${metadata.version}`); - } - return metadata.version; - } - } catch (e) { - if (isDebug) { - console.log(`[Version] Error reading metadata: ${e}`); - } - // Continue to next path - } - } - } - - const bundledVersion = app.getVersion(); - if (isDebug) { - console.log(`[Version] No metadata found, using bundled version: ${bundledVersion}`); - } - return bundledVersion; -} - -/** - * Parse version from GitHub release tag - * Handles tags like "v1.2.0", "1.2.0", "v1.2.0-beta" - */ -export function parseVersionFromTag(tag: string): string { - // Remove leading 'v' if present - return tag.replace(/^v/, ''); -} - /** * Parse a version string into its components * Handles versions like "2.7.2", "2.7.2-beta.6", "2.7.2-alpha.1" diff --git a/apps/frontend/src/main/utils/profile-manager.test.ts b/apps/frontend/src/main/utils/profile-manager.test.ts new file mode 100644 index 0000000000..a0e3aef370 --- /dev/null +++ b/apps/frontend/src/main/utils/profile-manager.test.ts @@ -0,0 +1,199 @@ +/** + * Tests for profile-manager.ts + * + * Red phase - write failing tests first + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { promises as fsPromises } from 'fs'; +import path from 'path'; +import { app } from 'electron'; +import { + loadProfilesFile, + saveProfilesFile, + generateProfileId, + validateFilePermissions +} from './profile-manager'; +import type { ProfilesFile } from '../../shared/types/profile'; + +// Mock Electron app.getPath +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') { + return '/mock/userdata'; + } + return '/mock/path'; + }) + } +})); + +// Mock fs module - mock the promises export which is used by profile-manager.ts +vi.mock('fs', () => { + const promises = { + readFile: vi.fn(), + writeFile: vi.fn(), + mkdir: vi.fn(), + chmod: vi.fn() + }; + return { + default: { promises }, // Default export contains promises + promises, // Named export for promises + existsSync: vi.fn(), + constants: { + O_RDONLY: 0, + S_IRUSR: 0o400 + } + }; +}); + +describe('profile-manager', () => { + const mockProfilesPath = '/mock/userdata/profiles.json'; + + beforeEach(() => { + vi.clearAllMocks(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('loadProfilesFile', () => { + it('should return default profiles file when file does not exist', async () => { + vi.mocked(fsPromises.readFile).mockRejectedValue(new Error('ENOENT')); + + const result = await loadProfilesFile(); + + expect(result).toEqual({ + profiles: [], + activeProfileId: null, + version: 1 + }); + }); + + it('should return default profiles file when file is corrupted JSON', async () => { + vi.mocked(fsPromises.readFile).mockResolvedValue(Buffer.from('invalid json{')); + + const result = await loadProfilesFile(); + + expect(result).toEqual({ + profiles: [], + activeProfileId: null, + version: 1 + }); + }); + + it('should load valid profiles file', async () => { + const mockData: ProfilesFile = { + profiles: [ + { + id: 'test-id-1', + name: 'Test Profile', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-test-key', + createdAt: Date.now(), + updatedAt: Date.now() + } + ], + activeProfileId: 'test-id-1', + version: 1 + }; + + vi.mocked(fsPromises.readFile).mockResolvedValue( + Buffer.from(JSON.stringify(mockData)) + ); + + const result = await loadProfilesFile(); + + expect(result).toEqual(mockData); + }); + + it('should use auto-claude directory for profiles.json path', async () => { + vi.mocked(fsPromises.readFile).mockRejectedValue(new Error('ENOENT')); + + await loadProfilesFile(); + + // Verify the file path includes auto-claude + const readFileCalls = vi.mocked(fsPromises.readFile).mock.calls; + const filePath = readFileCalls[0]?.[0]; + expect(filePath).toContain('auto-claude'); + expect(filePath).toContain('profiles.json'); + }); + }); + + describe('saveProfilesFile', () => { + it('should write profiles file to disk', async () => { + const mockData: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + vi.mocked(fsPromises.writeFile).mockResolvedValue(undefined); + + await saveProfilesFile(mockData); + + expect(fsPromises.writeFile).toHaveBeenCalled(); + const writeFileCall = vi.mocked(fsPromises.writeFile).mock.calls[0]; + const filePath = writeFileCall?.[0]; + const content = writeFileCall?.[1]; + + expect(filePath).toContain('auto-claude'); + expect(filePath).toContain('profiles.json'); + expect(content).toBe(JSON.stringify(mockData, null, 2)); + }); + + it('should throw error when write fails', async () => { + const mockData: ProfilesFile = { + profiles: [], + activeProfileId: null, + version: 1 + }; + + vi.mocked(fsPromises.writeFile).mockRejectedValue(new Error('Write failed')); + + await expect(saveProfilesFile(mockData)).rejects.toThrow('Write failed'); + }); + }); + + describe('generateProfileId', () => { + it('should generate unique UUID v4 format IDs', () => { + const id1 = generateProfileId(); + const id2 = generateProfileId(); + + // UUID v4 format: xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx + expect(id1).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/); + expect(id2).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/); + + // IDs should be unique + expect(id1).not.toBe(id2); + }); + + it('should generate different IDs on consecutive calls', () => { + const ids = new Set(); + for (let i = 0; i < 100; i++) { + ids.add(generateProfileId()); + } + expect(ids.size).toBe(100); + }); + }); + + describe('validateFilePermissions', () => { + it('should validate user-readable only file permissions', async () => { + // Mock successful chmod + vi.mocked(fsPromises.chmod).mockResolvedValue(undefined); + + const result = await validateFilePermissions('/mock/path/to/file.json'); + + expect(result).toBe(true); + }); + + it('should return false if chmod fails', async () => { + vi.mocked(fsPromises.chmod).mockRejectedValue(new Error('Permission denied')); + + const result = await validateFilePermissions('/mock/path/to/file.json'); + + expect(result).toBe(false); + }); + }); +}); diff --git a/apps/frontend/src/main/utils/profile-manager.ts b/apps/frontend/src/main/utils/profile-manager.ts new file mode 100644 index 0000000000..2d6deb8c59 --- /dev/null +++ b/apps/frontend/src/main/utils/profile-manager.ts @@ -0,0 +1,90 @@ +/** + * Profile Manager - File I/O for API profiles + * + * Handles loading and saving profiles.json from the auto-claude directory. + * Provides graceful handling for missing or corrupted files. + */ + +import { promises as fs } from 'fs'; +import path from 'path'; +import { app } from 'electron'; +import type { ProfilesFile } from '../../shared/types/profile'; + +/** + * Get the path to profiles.json in the auto-claude directory + */ +export function getProfilesFilePath(): string { + const userDataPath = app.getPath('userData'); + return path.join(userDataPath, 'auto-claude', 'profiles.json'); +} + +/** + * Load profiles.json from disk + * Returns default empty profiles file if file doesn't exist or is corrupted + */ +export async function loadProfilesFile(): Promise { + const filePath = getProfilesFilePath(); + + try { + const content = await fs.readFile(filePath, 'utf-8'); + const data = JSON.parse(content) as ProfilesFile; + return data; + } catch (error) { + // File doesn't exist or is corrupted - return default + return { + profiles: [], + activeProfileId: null, + version: 1 + }; + } +} + +/** + * Save profiles.json to disk + * Creates the auto-claude directory if it doesn't exist + */ +export async function saveProfilesFile(data: ProfilesFile): Promise { + const filePath = getProfilesFilePath(); + const dir = path.dirname(filePath); + + // Ensure directory exists + try { + await fs.mkdir(dir, { recursive: true }); + } catch (error) { + // Only ignore EEXIST errors (directory already exists) + // Rethrow other errors (e.g., permission issues) + if ((error as NodeJS.ErrnoException).code !== 'EEXIST') { + throw error; + } + } + + // Write file with formatted JSON + const content = JSON.stringify(data, null, 2); + await fs.writeFile(filePath, content, 'utf-8'); +} + +/** + * Generate a unique UUID v4 for a new profile + */ +export function generateProfileId(): string { + // Generate UUID v4 + return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = (Math.random() * 16) | 0; + const v = c === 'x' ? r : (r & 0x3) | 0x8; + return v.toString(16); + }); +} + +/** + * Validate and set file permissions to user-readable only + * Returns true if successful, false otherwise + */ +export async function validateFilePermissions(filePath: string): Promise { + try { + // Set file permissions to user-readable only (0600) + await fs.chmod(filePath, 0o600); + return true; + } catch { + return false; + } +} diff --git a/apps/frontend/src/main/utils/spec-number-lock.ts b/apps/frontend/src/main/utils/spec-number-lock.ts index d7a57bea10..b33fc455cc 100644 --- a/apps/frontend/src/main/utils/spec-number-lock.ts +++ b/apps/frontend/src/main/utils/spec-number-lock.ts @@ -154,7 +154,7 @@ export class SpecNumberLock { maxNumber = Math.max(maxNumber, this.scanSpecsDir(mainSpecsDir)); // 2. Scan all worktree specs - const worktreesDir = path.join(this.projectDir, '.worktrees'); + const worktreesDir = path.join(this.projectDir, '.auto-claude', 'worktrees', 'tasks'); if (existsSync(worktreesDir)) { try { const worktrees = readdirSync(worktreesDir, { withFileTypes: true }); diff --git a/apps/frontend/src/main/utils/windows-paths.ts b/apps/frontend/src/main/utils/windows-paths.ts new file mode 100644 index 0000000000..355640ac01 --- /dev/null +++ b/apps/frontend/src/main/utils/windows-paths.ts @@ -0,0 +1,283 @@ +/** + * Windows Executable Path Discovery Utility + * + * Provides reusable logic for finding Windows executables in common installation + * locations. Handles environment variable expansion and security validation. + * + * Used by cli-tool-manager.ts for Git, GitHub CLI, Claude CLI, etc. + * Follows the same pattern as homebrew-python.ts for platform-specific detection. + */ + +import { existsSync } from 'fs'; +import { access, constants } from 'fs/promises'; +import { execFileSync, execFile } from 'child_process'; +import { promisify } from 'util'; +import path from 'path'; +import os from 'os'; + +const execFileAsync = promisify(execFile); + +export interface WindowsToolPaths { + toolName: string; + executable: string; + patterns: string[]; +} + +export const WINDOWS_GIT_PATHS: WindowsToolPaths = { + toolName: 'Git', + executable: 'git.exe', + patterns: [ + '%PROGRAMFILES%\\Git\\cmd', + '%PROGRAMFILES(X86)%\\Git\\cmd', + '%LOCALAPPDATA%\\Programs\\Git\\cmd', + '%USERPROFILE%\\scoop\\apps\\git\\current\\cmd', + '%PROGRAMFILES%\\Git\\bin', + '%PROGRAMFILES(X86)%\\Git\\bin', + '%PROGRAMFILES%\\Git\\mingw64\\bin', + ], +}; + +function isSecurePath(pathStr: string): boolean { + const dangerousPatterns = [ + /[;&|`$(){}[\]<>!]/, // Shell metacharacters + /\.\.\//, // Unix directory traversal + /\.\.\\/, // Windows directory traversal + /[\r\n]/, // Newlines (command injection) + ]; + + for (const pattern of dangerousPatterns) { + if (pattern.test(pathStr)) { + return false; + } + } + + return true; +} + +export function expandWindowsPath(pathPattern: string): string | null { + const envVars: Record = { + '%PROGRAMFILES%': process.env.ProgramFiles || 'C:\\Program Files', + '%PROGRAMFILES(X86)%': process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', + '%LOCALAPPDATA%': process.env.LOCALAPPDATA, + '%APPDATA%': process.env.APPDATA, + '%USERPROFILE%': process.env.USERPROFILE || os.homedir(), + }; + + let expandedPath = pathPattern; + + for (const [placeholder, value] of Object.entries(envVars)) { + if (expandedPath.includes(placeholder)) { + if (!value) { + return null; + } + expandedPath = expandedPath.replace(placeholder, value); + } + } + + // Verify no unexpanded placeholders remain (indicates unknown variable) + if (/%[^%]+%/.test(expandedPath)) { + return null; + } + + // Normalize the path (resolve double backslashes, etc.) + return path.normalize(expandedPath); +} + +export function getWindowsExecutablePaths( + toolPaths: WindowsToolPaths, + logPrefix: string = '[Windows Paths]' +): string[] { + // Only run on Windows + if (process.platform !== 'win32') { + return []; + } + + const validPaths: string[] = []; + + for (const pattern of toolPaths.patterns) { + const expandedDir = expandWindowsPath(pattern); + + if (!expandedDir) { + console.warn(`${logPrefix} Could not expand path pattern: ${pattern}`); + continue; + } + + const fullPath = path.join(expandedDir, toolPaths.executable); + + // Security validation - reject potentially dangerous paths + if (!isSecurePath(fullPath)) { + console.warn(`${logPrefix} Path failed security validation: ${fullPath}`); + continue; + } + + if (existsSync(fullPath)) { + validPaths.push(fullPath); + } + } + + return validPaths; +} + +/** + * Find a Windows executable using the `where` command. + * This is the most reliable method as it searches: + * - All directories in PATH + * - App Paths registry entries + * - Current directory + * + * Works regardless of where the tool is installed (custom paths, different drives, etc.) + * + * @param executable - The executable name (e.g., 'git', 'gh', 'python') + * @param logPrefix - Prefix for console logging + * @returns The full path to the executable, or null if not found + */ +export function findWindowsExecutableViaWhere( + executable: string, + logPrefix: string = '[Windows Where]' +): string | null { + if (process.platform !== 'win32') { + return null; + } + + // Security: Only allow simple executable names (alphanumeric, dash, underscore, dot) + if (!/^[\w.-]+$/.test(executable)) { + console.warn(`${logPrefix} Invalid executable name: ${executable}`); + return null; + } + + try { + // Use 'where' command to find the executable + // where.exe is a built-in Windows command that finds executables + const result = execFileSync('where.exe', [executable], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + }).trim(); + + // 'where' returns multiple paths separated by newlines if found in multiple locations + // We take the first one (highest priority in PATH) + const paths = result.split(/\r?\n/).filter(p => p.trim()); + + if (paths.length > 0) { + const foundPath = paths[0].trim(); + + // Validate the path exists and is secure + if (existsSync(foundPath) && isSecurePath(foundPath)) { + console.log(`${logPrefix} Found via where: ${foundPath}`); + return foundPath; + } + } + + return null; + } catch { + // 'where' returns exit code 1 if not found, which throws an error + return null; + } +} + +/** + * Async version of getWindowsExecutablePaths. + * Use this in async contexts to avoid blocking the main process. + */ +export async function getWindowsExecutablePathsAsync( + toolPaths: WindowsToolPaths, + logPrefix: string = '[Windows Paths]' +): Promise { + // Only run on Windows + if (process.platform !== 'win32') { + return []; + } + + const validPaths: string[] = []; + + for (const pattern of toolPaths.patterns) { + const expandedDir = expandWindowsPath(pattern); + + if (!expandedDir) { + console.warn(`${logPrefix} Could not expand path pattern: ${pattern}`); + continue; + } + + const fullPath = path.join(expandedDir, toolPaths.executable); + + // Security validation - reject potentially dangerous paths + if (!isSecurePath(fullPath)) { + console.warn(`${logPrefix} Path failed security validation: ${fullPath}`); + continue; + } + + try { + await access(fullPath, constants.F_OK); + validPaths.push(fullPath); + } catch { + // File doesn't exist, skip + } + } + + return validPaths; +} + +/** + * Async version of findWindowsExecutableViaWhere. + * Use this in async contexts to avoid blocking the main process. + * + * Find a Windows executable using the `where` command. + * This is the most reliable method as it searches: + * - All directories in PATH + * - App Paths registry entries + * - Current directory + * + * Works regardless of where the tool is installed (custom paths, different drives, etc.) + * + * @param executable - The executable name (e.g., 'git', 'gh', 'python') + * @param logPrefix - Prefix for console logging + * @returns The full path to the executable, or null if not found + */ +export async function findWindowsExecutableViaWhereAsync( + executable: string, + logPrefix: string = '[Windows Where]' +): Promise { + if (process.platform !== 'win32') { + return null; + } + + // Security: Only allow simple executable names (alphanumeric, dash, underscore, dot) + if (!/^[\w.-]+$/.test(executable)) { + console.warn(`${logPrefix} Invalid executable name: ${executable}`); + return null; + } + + try { + // Use 'where' command to find the executable + // where.exe is a built-in Windows command that finds executables + const { stdout } = await execFileAsync('where.exe', [executable], { + encoding: 'utf-8', + timeout: 5000, + windowsHide: true, + }); + + // 'where' returns multiple paths separated by newlines if found in multiple locations + // We take the first one (highest priority in PATH) + const paths = stdout.trim().split(/\r?\n/).filter(p => p.trim()); + + if (paths.length > 0) { + const foundPath = paths[0].trim(); + + // Validate the path exists and is secure + try { + await access(foundPath, constants.F_OK); + if (isSecurePath(foundPath)) { + console.log(`${logPrefix} Found via where: ${foundPath}`); + return foundPath; + } + } catch { + // Path doesn't exist + } + } + + return null; + } catch { + // 'where' returns exit code 1 if not found, which throws an error + return null; + } +} diff --git a/apps/frontend/src/main/worktree-paths.ts b/apps/frontend/src/main/worktree-paths.ts new file mode 100644 index 0000000000..b7e3d02e3e --- /dev/null +++ b/apps/frontend/src/main/worktree-paths.ts @@ -0,0 +1,94 @@ +/** + * Shared worktree path utilities + * + * Centralizes all worktree path constants and helper functions to avoid duplication + * and ensure consistent path handling across the application. + */ + +import path from 'path'; +import { existsSync } from 'fs'; + +// Path constants for worktree directories +export const TASK_WORKTREE_DIR = '.auto-claude/worktrees/tasks'; +export const TERMINAL_WORKTREE_DIR = '.auto-claude/worktrees/terminal'; + +// Metadata directories (separate from git worktrees to avoid uncommitted files) +export const TERMINAL_WORKTREE_METADATA_DIR = '.auto-claude/terminal/metadata'; + +// Legacy path for backwards compatibility +export const LEGACY_WORKTREE_DIR = '.worktrees'; + +/** + * Get the task worktrees directory path + */ +export function getTaskWorktreeDir(projectPath: string): string { + return path.join(projectPath, TASK_WORKTREE_DIR); +} + +/** + * Get the full path for a specific task worktree + */ +export function getTaskWorktreePath(projectPath: string, specId: string): string { + return path.join(projectPath, TASK_WORKTREE_DIR, specId); +} + +/** + * Find a task worktree path, checking new location first then legacy + * Returns the path if found, null otherwise + */ +export function findTaskWorktree(projectPath: string, specId: string): string | null { + // Check new path first + const newPath = path.join(projectPath, TASK_WORKTREE_DIR, specId); + if (existsSync(newPath)) return newPath; + + // Legacy fallback + const legacyPath = path.join(projectPath, LEGACY_WORKTREE_DIR, specId); + if (existsSync(legacyPath)) return legacyPath; + + return null; +} + +/** + * Get the terminal worktrees directory path + */ +export function getTerminalWorktreeDir(projectPath: string): string { + return path.join(projectPath, TERMINAL_WORKTREE_DIR); +} + +/** + * Get the full path for a specific terminal worktree + */ +export function getTerminalWorktreePath(projectPath: string, name: string): string { + return path.join(projectPath, TERMINAL_WORKTREE_DIR, name); +} + +/** + * Find a terminal worktree path, checking new location first then legacy + * Returns the path if found, null otherwise + */ +export function findTerminalWorktree(projectPath: string, name: string): string | null { + // Check new path first + const newPath = path.join(projectPath, TERMINAL_WORKTREE_DIR, name); + if (existsSync(newPath)) return newPath; + + // Legacy fallback (terminal worktrees used terminal-{name} prefix) + const legacyPath = path.join(projectPath, LEGACY_WORKTREE_DIR, `terminal-${name}`); + if (existsSync(legacyPath)) return legacyPath; + + return null; +} + +/** + * Get the terminal worktree metadata directory path + * This is separate from the git worktree to avoid uncommitted files + */ +export function getTerminalWorktreeMetadataDir(projectPath: string): string { + return path.join(projectPath, TERMINAL_WORKTREE_METADATA_DIR); +} + +/** + * Get the metadata file path for a specific terminal worktree + */ +export function getTerminalWorktreeMetadataPath(projectPath: string, name: string): string { + return path.join(projectPath, TERMINAL_WORKTREE_METADATA_DIR, `${name}.json`); +} diff --git a/apps/frontend/src/preload/api/agent-api.ts b/apps/frontend/src/preload/api/agent-api.ts index c4ae68ff15..f9af4fadfb 100644 --- a/apps/frontend/src/preload/api/agent-api.ts +++ b/apps/frontend/src/preload/api/agent-api.ts @@ -8,7 +8,6 @@ * - Changelog operations * - Linear integration * - GitHub integration - * - Auto-build source updates * - Shell operations */ @@ -19,7 +18,6 @@ import { createChangelogAPI, ChangelogAPI } from './modules/changelog-api'; import { createLinearAPI, LinearAPI } from './modules/linear-api'; import { createGitHubAPI, GitHubAPI } from './modules/github-api'; import { createGitLabAPI, GitLabAPI } from './modules/gitlab-api'; -import { createAutoBuildAPI, AutoBuildAPI } from './modules/autobuild-api'; import { createShellAPI, ShellAPI } from './modules/shell-api'; /** @@ -34,7 +32,6 @@ export interface AgentAPI extends LinearAPI, GitHubAPI, GitLabAPI, - AutoBuildAPI, ShellAPI {} /** @@ -50,7 +47,6 @@ export const createAgentAPI = (): AgentAPI => { const linearAPI = createLinearAPI(); const githubAPI = createGitHubAPI(); const gitlabAPI = createGitLabAPI(); - const autobuildAPI = createAutoBuildAPI(); const shellAPI = createShellAPI(); return { @@ -75,9 +71,6 @@ export const createAgentAPI = (): AgentAPI => { // GitLab Integration API ...gitlabAPI, - // Auto-Build Source Update API - ...autobuildAPI, - // Shell Operations API ...shellAPI }; @@ -92,6 +85,5 @@ export type { LinearAPI, GitHubAPI, GitLabAPI, - AutoBuildAPI, ShellAPI }; diff --git a/apps/frontend/src/preload/api/app-update-api.ts b/apps/frontend/src/preload/api/app-update-api.ts index 3ba20af7d6..313c16eded 100644 --- a/apps/frontend/src/preload/api/app-update-api.ts +++ b/apps/frontend/src/preload/api/app-update-api.ts @@ -16,6 +16,7 @@ export interface AppUpdateAPI { // Operations checkAppUpdate: () => Promise>; downloadAppUpdate: () => Promise; + downloadStableUpdate: () => Promise; installAppUpdate: () => void; getAppVersion: () => Promise; @@ -29,6 +30,9 @@ export interface AppUpdateAPI { onAppUpdateProgress: ( callback: (progress: AppUpdateProgress) => void ) => IpcListenerCleanup; + onAppUpdateStableDowngrade: ( + callback: (info: AppUpdateInfo) => void + ) => IpcListenerCleanup; } /** @@ -42,6 +46,9 @@ export const createAppUpdateAPI = (): AppUpdateAPI => ({ downloadAppUpdate: (): Promise => invokeIpc(IPC_CHANNELS.APP_UPDATE_DOWNLOAD), + downloadStableUpdate: (): Promise => + invokeIpc(IPC_CHANNELS.APP_UPDATE_DOWNLOAD_STABLE), + installAppUpdate: (): void => { invokeIpc(IPC_CHANNELS.APP_UPDATE_INSTALL); }, @@ -63,5 +70,10 @@ export const createAppUpdateAPI = (): AppUpdateAPI => ({ onAppUpdateProgress: ( callback: (progress: AppUpdateProgress) => void ): IpcListenerCleanup => - createIpcListener(IPC_CHANNELS.APP_UPDATE_PROGRESS, callback) + createIpcListener(IPC_CHANNELS.APP_UPDATE_PROGRESS, callback), + + onAppUpdateStableDowngrade: ( + callback: (info: AppUpdateInfo) => void + ): IpcListenerCleanup => + createIpcListener(IPC_CHANNELS.APP_UPDATE_STABLE_DOWNGRADE, callback) }); diff --git a/apps/frontend/src/preload/api/index.ts b/apps/frontend/src/preload/api/index.ts index 51e28c76ae..5e01084ace 100644 --- a/apps/frontend/src/preload/api/index.ts +++ b/apps/frontend/src/preload/api/index.ts @@ -12,6 +12,7 @@ import { GitLabAPI, createGitLabAPI } from './modules/gitlab-api'; import { DebugAPI, createDebugAPI } from './modules/debug-api'; import { ClaudeCodeAPI, createClaudeCodeAPI } from './modules/claude-code-api'; import { McpAPI, createMcpAPI } from './modules/mcp-api'; +import { ProfileAPI, createProfileAPI } from './profile-api'; export interface ElectronAPI extends ProjectAPI, @@ -26,7 +27,8 @@ export interface ElectronAPI extends GitLabAPI, DebugAPI, ClaudeCodeAPI, - McpAPI { + McpAPI, + ProfileAPI { github: GitHubAPI; } @@ -44,6 +46,7 @@ export const createElectronAPI = (): ElectronAPI => ({ ...createDebugAPI(), ...createClaudeCodeAPI(), ...createMcpAPI(), + ...createProfileAPI(), github: createGitHubAPI() }); @@ -58,6 +61,7 @@ export { createIdeationAPI, createInsightsAPI, createAppUpdateAPI, + createProfileAPI, createGitHubAPI, createGitLabAPI, createDebugAPI, @@ -75,6 +79,7 @@ export type { IdeationAPI, InsightsAPI, AppUpdateAPI, + ProfileAPI, GitHubAPI, GitLabAPI, DebugAPI, diff --git a/apps/frontend/src/preload/api/modules/autobuild-api.ts b/apps/frontend/src/preload/api/modules/autobuild-api.ts deleted file mode 100644 index e0e7aca6a6..0000000000 --- a/apps/frontend/src/preload/api/modules/autobuild-api.ts +++ /dev/null @@ -1,43 +0,0 @@ -import { IPC_CHANNELS } from '../../../shared/constants'; -import type { - AutoBuildSourceUpdateCheck, - AutoBuildSourceUpdateProgress, - IPCResult -} from '../../../shared/types'; -import { createIpcListener, invokeIpc, sendIpc, IpcListenerCleanup } from './ipc-utils'; - -/** - * Auto-Build Source Update API operations - */ -export interface AutoBuildAPI { - // Operations - checkAutoBuildSourceUpdate: () => Promise>; - downloadAutoBuildSourceUpdate: () => void; - getAutoBuildSourceVersion: () => Promise>; - - // Event Listeners - onAutoBuildSourceUpdateProgress: ( - callback: (progress: AutoBuildSourceUpdateProgress) => void - ) => IpcListenerCleanup; -} - -/** - * Creates the Auto-Build Source Update API implementation - */ -export const createAutoBuildAPI = (): AutoBuildAPI => ({ - // Operations - checkAutoBuildSourceUpdate: (): Promise> => - invokeIpc(IPC_CHANNELS.AUTOBUILD_SOURCE_CHECK), - - downloadAutoBuildSourceUpdate: (): void => - sendIpc(IPC_CHANNELS.AUTOBUILD_SOURCE_DOWNLOAD), - - getAutoBuildSourceVersion: (): Promise> => - invokeIpc(IPC_CHANNELS.AUTOBUILD_SOURCE_VERSION), - - // Event Listeners - onAutoBuildSourceUpdateProgress: ( - callback: (progress: AutoBuildSourceUpdateProgress) => void - ): IpcListenerCleanup => - createIpcListener(IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS, callback) -}); diff --git a/apps/frontend/src/preload/api/modules/github-api.ts b/apps/frontend/src/preload/api/modules/github-api.ts index 7436f87345..c08c834a35 100644 --- a/apps/frontend/src/preload/api/modules/github-api.ts +++ b/apps/frontend/src/preload/api/modules/github-api.ts @@ -125,6 +125,26 @@ export interface AnalyzePreviewResult { error?: string; } +/** + * Workflow run awaiting approval (for fork PRs) + */ +export interface WorkflowAwaitingApproval { + id: number; + name: string; + html_url: string; + workflow_name: string; +} + +/** + * Workflows awaiting approval result + */ +export interface WorkflowsAwaitingApprovalResult { + awaiting_approval: number; + workflow_runs: WorkflowAwaitingApproval[]; + can_approve: boolean; + error?: string; +} + /** * GitHub Integration API operations */ @@ -234,23 +254,30 @@ export interface GitHubAPI { ) => IpcListenerCleanup; // PR operations - listPRs: (projectId: string) => Promise; + listPRs: (projectId: string, page?: number) => Promise; + getPR: (projectId: string, prNumber: number) => Promise; runPRReview: (projectId: string, prNumber: number) => void; cancelPRReview: (projectId: string, prNumber: number) => Promise; - postPRReview: (projectId: string, prNumber: number, selectedFindingIds?: string[]) => Promise; + postPRReview: (projectId: string, prNumber: number, selectedFindingIds?: string[], options?: { forceApprove?: boolean }) => Promise; deletePRReview: (projectId: string, prNumber: number) => Promise; postPRComment: (projectId: string, prNumber: number, body: string) => Promise; mergePR: (projectId: string, prNumber: number, mergeMethod?: 'merge' | 'squash' | 'rebase') => Promise; assignPR: (projectId: string, prNumber: number, username: string) => Promise; getPRReview: (projectId: string, prNumber: number) => Promise; + getPRReviewsBatch: (projectId: string, prNumbers: number[]) => Promise>; // Follow-up review operations checkNewCommits: (projectId: string, prNumber: number) => Promise; + checkMergeReadiness: (projectId: string, prNumber: number) => Promise; runFollowupReview: (projectId: string, prNumber: number) => void; // PR logs getPRLogs: (projectId: string, prNumber: number) => Promise; + // Workflow approval (for fork PRs) + getWorkflowsAwaitingApproval: (projectId: string, prNumber: number) => Promise; + approveWorkflow: (projectId: string, runId: number) => Promise; + // PR event listeners onPRReviewProgress: ( callback: (projectId: string, progress: PRReviewProgress) => void @@ -320,6 +347,7 @@ export interface PRReviewResult { error?: string; // Follow-up review fields reviewedCommitSha?: string; + reviewedFileBlobs?: Record; // filename โ†’ blob SHA for rebase-resistant follow-ups isFollowupReview?: boolean; previousReviewId?: number; resolvedFindings?: string[]; @@ -343,6 +371,21 @@ export interface NewCommitsCheck { hasCommitsAfterPosting?: boolean; } +/** + * Lightweight merge readiness check result + * Used for real-time validation of AI verdict freshness + */ +export interface MergeReadiness { + /** PR is in draft mode */ + isDraft: boolean; + /** GitHub's mergeable status */ + mergeable: 'MERGEABLE' | 'CONFLICTING' | 'UNKNOWN'; + /** Simplified CI status */ + ciStatus: 'passing' | 'failing' | 'pending' | 'none'; + /** List of blockers that contradict a "ready to merge" verdict */ + blockers: string[]; +} + /** * Review progress status */ @@ -585,8 +628,11 @@ export const createGitHubAPI = (): GitHubAPI => ({ createIpcListener(IPC_CHANNELS.GITHUB_AUTOFIX_ANALYZE_PREVIEW_ERROR, callback), // PR operations - listPRs: (projectId: string): Promise => - invokeIpc(IPC_CHANNELS.GITHUB_PR_LIST, projectId), + listPRs: (projectId: string, page: number = 1): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_PR_LIST, projectId, page), + + getPR: (projectId: string, prNumber: number): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_PR_GET, projectId, prNumber), runPRReview: (projectId: string, prNumber: number): void => sendIpc(IPC_CHANNELS.GITHUB_PR_REVIEW, projectId, prNumber), @@ -594,8 +640,8 @@ export const createGitHubAPI = (): GitHubAPI => ({ cancelPRReview: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_REVIEW_CANCEL, projectId, prNumber), - postPRReview: (projectId: string, prNumber: number, selectedFindingIds?: string[]): Promise => - invokeIpc(IPC_CHANNELS.GITHUB_PR_POST_REVIEW, projectId, prNumber, selectedFindingIds), + postPRReview: (projectId: string, prNumber: number, selectedFindingIds?: string[], options?: { forceApprove?: boolean }): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_PR_POST_REVIEW, projectId, prNumber, selectedFindingIds, options), deletePRReview: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_DELETE_REVIEW, projectId, prNumber), @@ -612,10 +658,16 @@ export const createGitHubAPI = (): GitHubAPI => ({ getPRReview: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_GET_REVIEW, projectId, prNumber), + getPRReviewsBatch: (projectId: string, prNumbers: number[]): Promise> => + invokeIpc(IPC_CHANNELS.GITHUB_PR_GET_REVIEWS_BATCH, projectId, prNumbers), + // Follow-up review operations checkNewCommits: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_CHECK_NEW_COMMITS, projectId, prNumber), + checkMergeReadiness: (projectId: string, prNumber: number): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_PR_CHECK_MERGE_READINESS, projectId, prNumber), + runFollowupReview: (projectId: string, prNumber: number): void => sendIpc(IPC_CHANNELS.GITHUB_PR_FOLLOWUP_REVIEW, projectId, prNumber), @@ -623,6 +675,13 @@ export const createGitHubAPI = (): GitHubAPI => ({ getPRLogs: (projectId: string, prNumber: number): Promise => invokeIpc(IPC_CHANNELS.GITHUB_PR_GET_LOGS, projectId, prNumber), + // Workflow approval (for fork PRs) + getWorkflowsAwaitingApproval: (projectId: string, prNumber: number): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_WORKFLOWS_AWAITING_APPROVAL, projectId, prNumber), + + approveWorkflow: (projectId: string, runId: number): Promise => + invokeIpc(IPC_CHANNELS.GITHUB_WORKFLOW_APPROVE, projectId, runId), + // PR event listeners onPRReviewProgress: ( callback: (projectId: string, progress: PRReviewProgress) => void diff --git a/apps/frontend/src/preload/api/modules/index.ts b/apps/frontend/src/preload/api/modules/index.ts index 48b4f8b2cf..e2cc553781 100644 --- a/apps/frontend/src/preload/api/modules/index.ts +++ b/apps/frontend/src/preload/api/modules/index.ts @@ -11,6 +11,5 @@ export * from './insights-api'; export * from './changelog-api'; export * from './linear-api'; export * from './github-api'; -export * from './autobuild-api'; export * from './shell-api'; export * from './debug-api'; diff --git a/apps/frontend/src/preload/api/profile-api.ts b/apps/frontend/src/preload/api/profile-api.ts new file mode 100644 index 0000000000..e285c6f10a --- /dev/null +++ b/apps/frontend/src/preload/api/profile-api.ts @@ -0,0 +1,144 @@ +import { ipcRenderer } from 'electron'; +import { IPC_CHANNELS } from '../../shared/constants'; +import type { IPCResult } from '../../shared/types'; +import type { + APIProfile, + ProfileFormData, + ProfilesFile, + TestConnectionResult, + DiscoverModelsResult +} from '@shared/types/profile'; + +export interface ProfileAPI { + // Get all profiles + getAPIProfiles: () => Promise>; + + // Save/create a profile + saveAPIProfile: ( + profile: ProfileFormData + ) => Promise>; + + // Update an existing profile + updateAPIProfile: ( + profile: APIProfile + ) => Promise>; + + // Delete a profile + deleteAPIProfile: (profileId: string) => Promise; + + // Set active profile (null to switch to OAuth) + setActiveAPIProfile: (profileId: string | null) => Promise; + + // Test API profile connection + testConnection: ( + baseUrl: string, + apiKey: string, + signal?: AbortSignal + ) => Promise>; + + // Discover available models from API + discoverModels: ( + baseUrl: string, + apiKey: string, + signal?: AbortSignal + ) => Promise>; +} + +let testConnectionRequestId = 0; +let discoverModelsRequestId = 0; + +export const createProfileAPI = (): ProfileAPI => ({ + // Get all profiles + getAPIProfiles: (): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_GET), + + // Save/create a profile + saveAPIProfile: ( + profile: ProfileFormData + ): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_SAVE, profile), + + // Update an existing profile + updateAPIProfile: ( + profile: APIProfile + ): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_UPDATE, profile), + + // Delete a profile + deleteAPIProfile: (profileId: string): Promise => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_DELETE, profileId), + + // Set active profile (null to switch to OAuth) + setActiveAPIProfile: (profileId: string | null): Promise => + ipcRenderer.invoke(IPC_CHANNELS.PROFILES_SET_ACTIVE, profileId), + + // Test API profile connection + testConnection: ( + baseUrl: string, + apiKey: string, + signal?: AbortSignal + ): Promise> => { + const requestId = ++testConnectionRequestId; + + // Check if already aborted before initiating request + if (signal && signal.aborted) { + return Promise.reject(new DOMException('The operation was aborted.', 'AbortError')); + } + + // Setup abort listener AFTER checking aborted status to avoid race condition + if (signal && typeof signal.addEventListener === 'function') { + try { + signal.addEventListener('abort', () => { + ipcRenderer.send(IPC_CHANNELS.PROFILES_TEST_CONNECTION_CANCEL, requestId); + }, { once: true }); + } catch (err) { + console.error('[preload/profile-api] Error adding abort listener:', err); + } + } else if (signal) { + console.warn('[preload/profile-api] signal provided but addEventListener not available - signal may have been serialized'); + } + + return ipcRenderer.invoke(IPC_CHANNELS.PROFILES_TEST_CONNECTION, baseUrl, apiKey, requestId); + }, + + // Discover available models from API + discoverModels: ( + baseUrl: string, + apiKey: string, + signal?: AbortSignal + ): Promise> => { + console.log('[preload/profile-api] discoverModels START'); + console.log('[preload/profile-api] baseUrl, apiKey:', baseUrl, apiKey?.slice(-4)); + + const requestId = ++discoverModelsRequestId; + console.log('[preload/profile-api] Request ID:', requestId); + + // Check if already aborted before initiating request + if (signal && signal.aborted) { + console.log('[preload/profile-api] Already aborted, rejecting'); + return Promise.reject(new DOMException('The operation was aborted.', 'AbortError')); + } + + // Setup abort listener AFTER checking aborted status to avoid race condition + if (signal && typeof signal.addEventListener === 'function') { + console.log('[preload/profile-api] Setting up abort listener...'); + try { + signal.addEventListener('abort', () => { + console.log('[preload/profile-api] Abort signal received for request:', requestId); + ipcRenderer.send(IPC_CHANNELS.PROFILES_DISCOVER_MODELS_CANCEL, requestId); + }, { once: true }); + console.log('[preload/profile-api] Abort listener added successfully'); + } catch (err) { + console.error('[preload/profile-api] Error adding abort listener:', err); + } + } else if (signal) { + console.warn('[preload/profile-api] signal provided but addEventListener not available - signal may have been serialized'); + } + + const channel = 'profiles:discover-models'; + console.log('[preload/profile-api] About to invoke IPC channel:', channel); + const promise = ipcRenderer.invoke(channel, baseUrl, apiKey, requestId); + console.log('[preload/profile-api] IPC invoke called, promise returned'); + return promise; + } +}); diff --git a/apps/frontend/src/preload/api/settings-api.ts b/apps/frontend/src/preload/api/settings-api.ts index 263c32d084..1c1f8752f9 100644 --- a/apps/frontend/src/preload/api/settings-api.ts +++ b/apps/frontend/src/preload/api/settings-api.ts @@ -28,6 +28,11 @@ export interface SettingsAPI { getSourceEnv: () => Promise>; updateSourceEnv: (config: { claudeOAuthToken?: string }) => Promise; checkSourceToken: () => Promise>; + + // Sentry error reporting + notifySentryStateChanged: (enabled: boolean) => void; + getSentryDsn: () => Promise; + getSentryConfig: () => Promise<{ dsn: string; tracesSampleRate: number; profilesSampleRate: number }>; } export const createSettingsAPI = (): SettingsAPI => ({ @@ -59,5 +64,17 @@ export const createSettingsAPI = (): SettingsAPI => ({ ipcRenderer.invoke(IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_UPDATE, config), checkSourceToken: (): Promise> => - ipcRenderer.invoke(IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN) + ipcRenderer.invoke(IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN), + + // Sentry error reporting - notify main process when setting changes + notifySentryStateChanged: (enabled: boolean): void => + ipcRenderer.send(IPC_CHANNELS.SENTRY_STATE_CHANGED, enabled), + + // Get Sentry DSN from main process (loaded from environment variable) + getSentryDsn: (): Promise => + ipcRenderer.invoke(IPC_CHANNELS.GET_SENTRY_DSN), + + // Get full Sentry config from main process (DSN + sample rates) + getSentryConfig: (): Promise<{ dsn: string; tracesSampleRate: number; profilesSampleRate: number }> => + ipcRenderer.invoke(IPC_CHANNELS.GET_SENTRY_CONFIG) }); diff --git a/apps/frontend/src/preload/api/task-api.ts b/apps/frontend/src/preload/api/task-api.ts index 6049f85b75..c2f3a6781a 100644 --- a/apps/frontend/src/preload/api/task-api.ts +++ b/apps/frontend/src/preload/api/task-api.ts @@ -59,12 +59,13 @@ export interface TaskAPI { unarchiveTasks: (projectId: string, taskIds: string[]) => Promise>; // Task Event Listeners - onTaskProgress: (callback: (taskId: string, plan: ImplementationPlan) => void) => () => void; - onTaskError: (callback: (taskId: string, error: string) => void) => () => void; - onTaskLog: (callback: (taskId: string, log: string) => void) => () => void; - onTaskStatusChange: (callback: (taskId: string, status: TaskStatus) => void) => () => void; + // Note: projectId is optional for backward compatibility - events without projectId will still work + onTaskProgress: (callback: (taskId: string, plan: ImplementationPlan, projectId?: string) => void) => () => void; + onTaskError: (callback: (taskId: string, error: string, projectId?: string) => void) => () => void; + onTaskLog: (callback: (taskId: string, log: string, projectId?: string) => void) => () => void; + onTaskStatusChange: (callback: (taskId: string, status: TaskStatus, projectId?: string) => void) => () => void; onTaskExecutionProgress: ( - callback: (taskId: string, progress: import('../../shared/types').ExecutionProgress) => void + callback: (taskId: string, progress: import('../../shared/types').ExecutionProgress, projectId?: string) => void ) => () => void; // Task Phase Logs @@ -161,14 +162,15 @@ export const createTaskAPI = (): TaskAPI => ({ // Task Event Listeners onTaskProgress: ( - callback: (taskId: string, plan: ImplementationPlan) => void + callback: (taskId: string, plan: ImplementationPlan, projectId?: string) => void ): (() => void) => { const handler = ( _event: Electron.IpcRendererEvent, taskId: string, - plan: ImplementationPlan + plan: ImplementationPlan, + projectId?: string ): void => { - callback(taskId, plan); + callback(taskId, plan, projectId); }; ipcRenderer.on(IPC_CHANNELS.TASK_PROGRESS, handler); return () => { @@ -177,14 +179,15 @@ export const createTaskAPI = (): TaskAPI => ({ }, onTaskError: ( - callback: (taskId: string, error: string) => void + callback: (taskId: string, error: string, projectId?: string) => void ): (() => void) => { const handler = ( _event: Electron.IpcRendererEvent, taskId: string, - error: string + error: string, + projectId?: string ): void => { - callback(taskId, error); + callback(taskId, error, projectId); }; ipcRenderer.on(IPC_CHANNELS.TASK_ERROR, handler); return () => { @@ -193,14 +196,15 @@ export const createTaskAPI = (): TaskAPI => ({ }, onTaskLog: ( - callback: (taskId: string, log: string) => void + callback: (taskId: string, log: string, projectId?: string) => void ): (() => void) => { const handler = ( _event: Electron.IpcRendererEvent, taskId: string, - log: string + log: string, + projectId?: string ): void => { - callback(taskId, log); + callback(taskId, log, projectId); }; ipcRenderer.on(IPC_CHANNELS.TASK_LOG, handler); return () => { @@ -209,14 +213,15 @@ export const createTaskAPI = (): TaskAPI => ({ }, onTaskStatusChange: ( - callback: (taskId: string, status: TaskStatus) => void + callback: (taskId: string, status: TaskStatus, projectId?: string) => void ): (() => void) => { const handler = ( _event: Electron.IpcRendererEvent, taskId: string, - status: TaskStatus + status: TaskStatus, + projectId?: string ): void => { - callback(taskId, status); + callback(taskId, status, projectId); }; ipcRenderer.on(IPC_CHANNELS.TASK_STATUS_CHANGE, handler); return () => { @@ -225,14 +230,15 @@ export const createTaskAPI = (): TaskAPI => ({ }, onTaskExecutionProgress: ( - callback: (taskId: string, progress: import('../../shared/types').ExecutionProgress) => void + callback: (taskId: string, progress: import('../../shared/types').ExecutionProgress, projectId?: string) => void ): (() => void) => { const handler = ( _event: Electron.IpcRendererEvent, taskId: string, - progress: import('../../shared/types').ExecutionProgress + progress: import('../../shared/types').ExecutionProgress, + projectId?: string ): void => { - callback(taskId, progress); + callback(taskId, progress, projectId); }; ipcRenderer.on(IPC_CHANNELS.TASK_EXECUTION_PROGRESS, handler); return () => { diff --git a/apps/frontend/src/preload/api/terminal-api.ts b/apps/frontend/src/preload/api/terminal-api.ts index 14aaa3e507..7ea08f7177 100644 --- a/apps/frontend/src/preload/api/terminal-api.ts +++ b/apps/frontend/src/preload/api/terminal-api.ts @@ -1,12 +1,21 @@ import { ipcRenderer } from 'electron'; import { IPC_CHANNELS } from '../../shared/constants'; + +// Increase max listeners to accommodate 12 terminals with multiple event types +// Each terminal can have listeners for: output, exit, titleChange, claudeSession, etc. +// Default is 10, but with 12 terminals we need more headroom +ipcRenderer.setMaxListeners(50); + import type { IPCResult, TerminalCreateOptions, RateLimitInfo, ClaudeProfile, ClaudeProfileSettings, - ClaudeUsageSnapshot + ClaudeUsageSnapshot, + CreateTerminalWorktreeRequest, + TerminalWorktreeConfig, + TerminalWorktreeResult, } from '../../shared/types'; /** Type for proactive swap notification events */ @@ -25,6 +34,8 @@ export interface TerminalAPI { resizeTerminal: (id: string, cols: number, rows: number) => void; invokeClaudeInTerminal: (id: string, cwd?: string) => void; generateTerminalName: (command: string, cwd?: string) => Promise>; + setTerminalTitle: (id: string, title: string) => void; + setTerminalWorktreeConfig: (id: string, config: TerminalWorktreeConfig | undefined) => void; // Terminal Session Management getTerminalSessions: (projectPath: string) => Promise>; @@ -35,6 +46,7 @@ export interface TerminalAPI { ) => Promise>; clearTerminalSessions: (projectPath: string) => Promise; resumeClaudeInTerminal: (id: string, sessionId?: string) => void; + activateDeferredClaudeResume: (id: string) => void; getTerminalSessionDates: (projectPath?: string) => Promise>; getTerminalSessionsForDate: ( date: string, @@ -48,6 +60,11 @@ export interface TerminalAPI { ) => Promise>; checkTerminalPtyAlive: (terminalId: string) => Promise>; + // Terminal Worktree Operations (isolated development) + createTerminalWorktree: (request: CreateTerminalWorktreeRequest) => Promise; + listTerminalWorktrees: (projectPath: string) => Promise>; + removeTerminalWorktree: (projectPath: string, name: string, deleteBranch?: boolean) => Promise; + // Terminal Event Listeners onTerminalOutput: (callback: (id: string, data: string) => void) => () => void; onTerminalExit: (callback: (id: string, exitCode: number) => void) => () => void; @@ -57,6 +74,11 @@ export interface TerminalAPI { onTerminalOAuthToken: ( callback: (info: { terminalId: string; profileId?: string; email?: string; success: boolean; message?: string; detectedAt: string }) => void ) => () => void; + onTerminalAuthCreated: ( + callback: (info: { terminalId: string; profileId: string; profileName: string }) => void + ) => () => void; + onTerminalClaudeBusy: (callback: (id: string, isBusy: boolean) => void) => () => void; + onTerminalPendingResume: (callback: (id: string, sessionId?: string) => void) => () => void; // Claude Profile Management getClaudeProfiles: () => Promise>; @@ -100,6 +122,12 @@ export const createTerminalAPI = (): TerminalAPI => ({ generateTerminalName: (command: string, cwd?: string): Promise> => ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_GENERATE_NAME, command, cwd), + setTerminalTitle: (id: string, title: string): void => + ipcRenderer.send(IPC_CHANNELS.TERMINAL_SET_TITLE, id, title), + + setTerminalWorktreeConfig: (id: string, config: TerminalWorktreeConfig | undefined): void => + ipcRenderer.send(IPC_CHANNELS.TERMINAL_SET_WORKTREE_CONFIG, id, config), + // Terminal Session Management getTerminalSessions: (projectPath: string): Promise> => ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_GET_SESSIONS, projectPath), @@ -117,6 +145,9 @@ export const createTerminalAPI = (): TerminalAPI => ({ resumeClaudeInTerminal: (id: string, sessionId?: string): void => ipcRenderer.send(IPC_CHANNELS.TERMINAL_RESUME_CLAUDE, id, sessionId), + activateDeferredClaudeResume: (id: string): void => + ipcRenderer.send(IPC_CHANNELS.TERMINAL_ACTIVATE_DEFERRED_RESUME, id), + getTerminalSessionDates: (projectPath?: string): Promise> => ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_GET_SESSION_DATES, projectPath), @@ -137,6 +168,16 @@ export const createTerminalAPI = (): TerminalAPI => ({ checkTerminalPtyAlive: (terminalId: string): Promise> => ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_CHECK_PTY_ALIVE, terminalId), + // Terminal Worktree Operations (isolated development) + createTerminalWorktree: (request: CreateTerminalWorktreeRequest): Promise => + ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_WORKTREE_CREATE, request), + + listTerminalWorktrees: (projectPath: string): Promise> => + ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_WORKTREE_LIST, projectPath), + + removeTerminalWorktree: (projectPath: string, name: string, deleteBranch: boolean = false): Promise => + ipcRenderer.invoke(IPC_CHANNELS.TERMINAL_WORKTREE_REMOVE, projectPath, name, deleteBranch), + // Terminal Event Listeners onTerminalOutput: ( callback: (id: string, data: string) => void @@ -232,6 +273,53 @@ export const createTerminalAPI = (): TerminalAPI => ({ }; }, + onTerminalAuthCreated: ( + callback: (info: { terminalId: string; profileId: string; profileName: string }) => void + ): (() => void) => { + const handler = ( + _event: Electron.IpcRendererEvent, + info: { terminalId: string; profileId: string; profileName: string } + ): void => { + callback(info); + }; + ipcRenderer.on(IPC_CHANNELS.TERMINAL_AUTH_CREATED, handler); + return () => { + ipcRenderer.removeListener(IPC_CHANNELS.TERMINAL_AUTH_CREATED, handler); + }; + }, + + onTerminalClaudeBusy: ( + callback: (id: string, isBusy: boolean) => void + ): (() => void) => { + const handler = ( + _event: Electron.IpcRendererEvent, + id: string, + isBusy: boolean + ): void => { + callback(id, isBusy); + }; + ipcRenderer.on(IPC_CHANNELS.TERMINAL_CLAUDE_BUSY, handler); + return () => { + ipcRenderer.removeListener(IPC_CHANNELS.TERMINAL_CLAUDE_BUSY, handler); + }; + }, + + onTerminalPendingResume: ( + callback: (id: string, sessionId?: string) => void + ): (() => void) => { + const handler = ( + _event: Electron.IpcRendererEvent, + id: string, + sessionId?: string + ): void => { + callback(id, sessionId); + }; + ipcRenderer.on(IPC_CHANNELS.TERMINAL_PENDING_RESUME, handler); + return () => { + ipcRenderer.removeListener(IPC_CHANNELS.TERMINAL_PENDING_RESUME, handler); + }; + }, + // Claude Profile Management getClaudeProfiles: (): Promise> => ipcRenderer.invoke(IPC_CHANNELS.CLAUDE_PROFILES_GET), diff --git a/apps/frontend/src/renderer/App.tsx b/apps/frontend/src/renderer/App.tsx index e8a9289b56..e9bfca32d1 100644 --- a/apps/frontend/src/renderer/App.tsx +++ b/apps/frontend/src/renderer/App.tsx @@ -16,6 +16,7 @@ import { } from '@dnd-kit/sortable'; import { TooltipProvider } from './components/ui/tooltip'; import { Button } from './components/ui/button'; +import { Toaster } from './components/ui/toaster'; import { Dialog, DialogContent, @@ -51,7 +52,8 @@ import { ProactiveSwapListener } from './components/ProactiveSwapListener'; import { GitHubSetupModal } from './components/GitHubSetupModal'; import { useProjectStore, loadProjects, addProject, initializeProject, removeProject } from './stores/project-store'; import { useTaskStore, loadTasks } from './stores/task-store'; -import { useSettingsStore, loadSettings } from './stores/settings-store'; +import { useSettingsStore, loadSettings, loadProfiles } from './stores/settings-store'; +import { useClaudeProfileStore } from './stores/claude-profile-store'; import { useTerminalStore, restoreTerminalSessions } from './stores/terminal-store'; import { initializeGitHubListeners } from './stores/github'; import { initDownloadProgressListener } from './stores/download-store'; @@ -61,10 +63,9 @@ import { COLOR_THEMES, UI_SCALE_MIN, UI_SCALE_MAX, UI_SCALE_DEFAULT } from '../s import type { Task, Project, ColorTheme } from '../shared/types'; import { ProjectTabBar } from './components/ProjectTabBar'; import { AddProjectModal } from './components/AddProjectModal'; -import { ViewStateProvider, useViewState } from './contexts/ViewStateContext'; +import { ViewStateProvider } from './contexts/ViewStateContext'; -// Wrapper component that connects ProjectTabBar to ViewStateContext -// (needed because App renders the Provider and can't use useViewState directly) +// Wrapper component for ProjectTabBar interface ProjectTabBarWithContextProps { projects: Project[]; activeProjectId: string | null; @@ -72,7 +73,6 @@ interface ProjectTabBarWithContextProps { onProjectClose: (projectId: string) => void; onAddProject: () => void; onSettingsClick: () => void; - tasks: Task[]; } function ProjectTabBarWithContext({ @@ -81,12 +81,8 @@ function ProjectTabBarWithContext({ onProjectSelect, onProjectClose, onAddProject, - onSettingsClick, - tasks + onSettingsClick }: ProjectTabBarWithContextProps) { - const { showArchived, toggleShowArchived } = useViewState(); - const archivedCount = tasks.filter(t => t.metadata?.archivedAt).length; - return ( ); } @@ -119,6 +112,13 @@ export function App() { const settings = useSettingsStore((state) => state.settings); const settingsLoading = useSettingsStore((state) => state.isLoading); + // API Profile state + const profiles = useSettingsStore((state) => state.profiles); + const activeProfileId = useSettingsStore((state) => state.activeProfileId); + + // Claude Profile state (OAuth) + const claudeProfiles = useClaudeProfileStore((state) => state.profiles); + // UI State const [selectedTask, setSelectedTask] = useState(null); const [isNewTaskDialogOpen, setIsNewTaskDialogOpen] = useState(false); @@ -167,6 +167,7 @@ export function App() { useEffect(() => { loadProjects(); loadSettings(); + loadProfiles(); // Initialize global GitHub listeners (PR reviews, etc.) so they persist across navigation initializeGitHubListeners(); // Initialize global download progress listener for Ollama model downloads @@ -239,10 +240,21 @@ export function App() { // First-run detection - show onboarding wizard if not completed // Only check AFTER settings have been loaded from disk to avoid race condition useEffect(() => { - if (settingsHaveLoaded && settings.onboardingCompleted === false) { + // Check if either auth method is configured + // API profiles: if profiles exist, auth is configured (user has gone through setup) + const hasAPIProfileConfigured = profiles.length > 0; + const hasOAuthConfigured = claudeProfiles.some(p => + p.oauthToken || (p.isDefault && p.configDir) + ); + const hasAnyAuth = hasAPIProfileConfigured || hasOAuthConfigured; + + // Only show wizard if onboarding not completed AND no auth is configured + if (settingsHaveLoaded && + settings.onboardingCompleted === false && + !hasAnyAuth) { setIsOnboardingWizardOpen(true); } - }, [settingsHaveLoaded, settings.onboardingCompleted]); + }, [settingsHaveLoaded, settings.onboardingCompleted, profiles, claudeProfiles]); // Sync i18n language with settings const { t, i18n } = useTranslation('dialogs'); @@ -700,7 +712,6 @@ export function App() { onProjectClose={handleProjectTabClose} onAddProject={handleAddProject} onSettingsClick={() => setIsSettingsDialogOpen(true)} - tasks={tasks} /> @@ -1001,6 +1012,9 @@ export function App() { {/* Global Download Indicator - shows Ollama model download progress */} + + {/* Toast notifications */} + diff --git a/apps/frontend/src/renderer/components/AddFeatureDialog.tsx b/apps/frontend/src/renderer/components/AddFeatureDialog.tsx index d29e2b977e..d139298b93 100644 --- a/apps/frontend/src/renderer/components/AddFeatureDialog.tsx +++ b/apps/frontend/src/renderer/components/AddFeatureDialog.tsx @@ -208,6 +208,7 @@ export function AddFeatureDialog({ value={title} onChange={(e) => setTitle(e.target.value)} disabled={isSaving} + aria-required="true" /> @@ -223,6 +224,7 @@ export function AddFeatureDialog({ onChange={(e) => setDescription(e.target.value)} rows={3} disabled={isSaving} + aria-required="true" /> @@ -253,7 +255,7 @@ export function AddFeatureDialog({ onValueChange={setPhaseId} disabled={isSaving} > - + @@ -338,7 +340,7 @@ export function AddFeatureDialog({ {/* Error */} {error && ( -
+
{error}
diff --git a/apps/frontend/src/renderer/components/AddProjectModal.tsx b/apps/frontend/src/renderer/components/AddProjectModal.tsx index fa8db82c41..852f3febcd 100644 --- a/apps/frontend/src/renderer/components/AddProjectModal.tsx +++ b/apps/frontend/src/renderer/components/AddProjectModal.tsx @@ -167,6 +167,7 @@ export function AddProjectModal({ open, onOpenChange, onProjectAdded }: AddProje 'bg-card hover:bg-accent hover:border-accent transition-all duration-200', 'text-left group' )} + aria-label={t('addProject.openExistingAriaLabel')} >
@@ -188,6 +189,7 @@ export function AddProjectModal({ open, onOpenChange, onProjectAdded }: AddProje 'bg-card hover:bg-accent hover:border-accent transition-all duration-200', 'text-left group' )} + aria-label={t('addProject.createNewAriaLabel')} >
@@ -203,7 +205,7 @@ export function AddProjectModal({ open, onOpenChange, onProjectAdded }: AddProje
{error && ( -
+
{error}
)} @@ -272,7 +274,7 @@ export function AddProjectModal({ open, onOpenChange, onProjectAdded }: AddProje
{error && ( -
+
{error}
)} diff --git a/apps/frontend/src/renderer/components/AgentProfileSelector.tsx b/apps/frontend/src/renderer/components/AgentProfileSelector.tsx index 6d23cb97ad..fa74affa25 100644 --- a/apps/frontend/src/renderer/components/AgentProfileSelector.tsx +++ b/apps/frontend/src/renderer/components/AgentProfileSelector.tsx @@ -96,23 +96,18 @@ export function AgentProfileSelector({ if (selectedId === 'custom') { // Keep current model/thinking level, just mark as custom onProfileChange('custom', model as ModelType || 'sonnet', thinkingLevel as ThinkingLevel || 'medium'); - } else if (selectedId === 'auto') { - // Auto profile - set defaults - const autoProfile = DEFAULT_AGENT_PROFILES.find(p => p.id === 'auto'); - if (autoProfile) { - onProfileChange('auto', autoProfile.model, autoProfile.thinkingLevel); - // Initialize phase configs with defaults if callback provided - if (onPhaseModelsChange && autoProfile.phaseModels) { - onPhaseModelsChange(autoProfile.phaseModels); - } - if (onPhaseThinkingChange && autoProfile.phaseThinking) { - onPhaseThinkingChange(autoProfile.phaseThinking); - } - } } else { + // Select preset profile - all profiles now have phase configs const profile = DEFAULT_AGENT_PROFILES.find(p => p.id === selectedId); if (profile) { onProfileChange(profile.id, profile.model, profile.thinkingLevel); + // Initialize phase configs with profile defaults if callbacks provided + if (onPhaseModelsChange && profile.phaseModels) { + onPhaseModelsChange(profile.phaseModels); + } + if (onPhaseThinkingChange && profile.phaseThinking) { + onPhaseThinkingChange(profile.phaseThinking); + } } } }; @@ -193,10 +188,7 @@ export function AgentProfileSelector({
{profile.name} - {profile.isAutoProfile - ? '(per-phase optimization)' - : `(${modelLabel} + ${profile.thinkingLevel})` - } + ({modelLabel} + {profile.thinkingLevel})
@@ -221,8 +213,8 @@ export function AgentProfileSelector({

- {/* Auto Profile - Phase Configuration */} - {isAuto && ( + {/* Phase Configuration - shown for all preset profiles */} + {!isCustom && (
{/* Clickable Header */} + + +
+
+ Authentication + {isOAuth ? 'OAuth' : 'API Profile'} +
+ {!isOAuth && authStatus.name && ( + <> +
+
+ Using profile: {authStatus.name} +
+ + )} +
+ + + + ); +} diff --git a/apps/frontend/src/renderer/components/ClaudeCodeStatusBadge.tsx b/apps/frontend/src/renderer/components/ClaudeCodeStatusBadge.tsx index 0674400602..726982faf5 100644 --- a/apps/frontend/src/renderer/components/ClaudeCodeStatusBadge.tsx +++ b/apps/frontend/src/renderer/components/ClaudeCodeStatusBadge.tsx @@ -294,9 +294,10 @@ export function ClaudeCodeStatusBadge({ className }: ClaudeCodeStatusBadgeProps) size="sm" className="w-full text-xs text-muted-foreground gap-1" onClick={() => window.electronAPI?.openExternal?.('https://claude.ai/code')} + aria-label={t('navigation:claudeCode.learnMoreAriaLabel', 'Learn more about Claude Code (opens in new window)')} > {t('navigation:claudeCode.learnMore', 'Learn more about Claude Code')} - +
diff --git a/apps/frontend/src/renderer/components/CompetitorAnalysisViewer.tsx b/apps/frontend/src/renderer/components/CompetitorAnalysisViewer.tsx index c71043d72b..53f47767f7 100644 --- a/apps/frontend/src/renderer/components/CompetitorAnalysisViewer.tsx +++ b/apps/frontend/src/renderer/components/CompetitorAnalysisViewer.tsx @@ -1,3 +1,4 @@ +import { useTranslation } from 'react-i18next'; import { TrendingUp, ExternalLink, AlertCircle } from 'lucide-react'; import { Dialog, @@ -21,6 +22,8 @@ export function CompetitorAnalysisViewer({ open, onOpenChange, }: CompetitorAnalysisViewerProps) { + const { t } = useTranslation('common'); + if (!analysis) return null; return ( @@ -66,9 +69,11 @@ export function CompetitorAnalysisViewer({ target="_blank" rel="noopener noreferrer" className="text-primary hover:underline flex items-center gap-1 text-sm ml-4" + aria-label={t('accessibility.visitExternalLink', { name: competitor.name })} > - +
diff --git a/apps/frontend/src/renderer/components/EnvConfigModal.tsx b/apps/frontend/src/renderer/components/EnvConfigModal.tsx index f7c95bff4d..f35138b819 100644 --- a/apps/frontend/src/renderer/components/EnvConfigModal.tsx +++ b/apps/frontend/src/renderer/components/EnvConfigModal.tsx @@ -1,4 +1,4 @@ -import { useState, useEffect } from 'react'; +import { useState, useEffect, useCallback } from 'react'; import { AlertCircle, Key, @@ -13,6 +13,7 @@ import { ChevronDown, ChevronRight } from 'lucide-react'; +import { useSettingsStore } from '../stores/settings-store'; import { Dialog, DialogContent, @@ -592,35 +593,51 @@ export function EnvConfigModal({ /** * Hook to check if the Claude token is configured * Returns { hasToken, isLoading, checkToken } + * + * This combines two sources of authentication: + * 1. OAuth token from source .env (checked via checkSourceToken) + * 2. Active API profile (custom Anthropic-compatible endpoint) */ export function useClaudeTokenCheck() { const [hasToken, setHasToken] = useState(null); const [isLoading, setIsLoading] = useState(true); const [error, setError] = useState(null); - const checkToken = async () => { + // Get active API profile from settings store + const activeProfileId = useSettingsStore((state) => state.activeProfileId); + + const checkToken = useCallback(async () => { setIsLoading(true); setError(null); + // Compute once - activeProfileId is captured from closure + const hasAPIProfile = !!activeProfileId; + try { const result = await window.electronAPI.checkSourceToken(); - if (result.success && result.data) { - setHasToken(result.data.hasToken); - } else { - setHasToken(false); + const hasSourceOAuthToken = result.success && result.data?.hasToken; + + // Auth is valid if either OAuth token OR API profile exists + setHasToken(hasSourceOAuthToken || hasAPIProfile); + + // Set error if OAuth check failed and no API profile fallback + if (!result.success && !hasAPIProfile) { setError(result.error || 'Failed to check token'); } } catch (err) { - setHasToken(false); - setError(err instanceof Error ? err.message : 'Unknown error'); + // Even if OAuth check fails, API profile is still valid auth + setHasToken(hasAPIProfile); + if (!hasAPIProfile) { + setError(err instanceof Error ? err.message : 'Unknown error'); + } } finally { setIsLoading(false); } - }; + }, [activeProfileId]); useEffect(() => { checkToken(); - }, []); + }, [checkToken]); // Re-check when checkToken changes (i.e., when activeProfileId changes) return { hasToken, isLoading, error, checkToken }; } diff --git a/apps/frontend/src/renderer/components/FileExplorerPanel.tsx b/apps/frontend/src/renderer/components/FileExplorerPanel.tsx index e7c0b98042..5c598d842f 100644 --- a/apps/frontend/src/renderer/components/FileExplorerPanel.tsx +++ b/apps/frontend/src/renderer/components/FileExplorerPanel.tsx @@ -1,4 +1,5 @@ import { motion, AnimatePresence } from 'motion/react'; +import { useTranslation } from 'react-i18next'; import { X, FolderTree, RefreshCw } from 'lucide-react'; import { Button } from './ui/button'; import { ScrollArea } from './ui/scroll-area'; @@ -34,6 +35,7 @@ const contentVariants = { }; export function FileExplorerPanel({ projectPath }: FileExplorerPanelProps) { + const { t } = useTranslation('common'); const { isOpen, close, clearCache, loadDirectory } = useFileExplorerStore(); const handleRefresh = () => { @@ -80,17 +82,18 @@ export function FileExplorerPanel({ projectPath }: FileExplorerPanelProps) { size="icon" className="h-6 w-6" onClick={handleRefresh} - title="Refresh" + aria-label={t('buttons.refresh')} > - +
diff --git a/apps/frontend/src/renderer/components/FileTreeItem.tsx b/apps/frontend/src/renderer/components/FileTreeItem.tsx index e0af5b9486..d6273b5a2b 100644 --- a/apps/frontend/src/renderer/components/FileTreeItem.tsx +++ b/apps/frontend/src/renderer/components/FileTreeItem.tsx @@ -1,4 +1,5 @@ -import { useState, useRef, useEffect, type DragEvent } from 'react'; +import { useState, useRef, useEffect, type DragEvent, type KeyboardEvent } from 'react'; +import { useTranslation } from 'react-i18next'; import { ChevronRight, ChevronDown, Folder, File, FileCode, FileJson, FileText, FileImage, Loader2 } from 'lucide-react'; import { cn } from '../lib/utils'; import type { FileNode } from '../../shared/types'; @@ -70,6 +71,7 @@ export function FileTreeItem({ isLoading, onToggle, }: FileTreeItemProps) { + const { t } = useTranslation('common'); const [isDragging, setIsDragging] = useState(false); const dragImageRef = useRef(null); @@ -98,6 +100,16 @@ export function FileTreeItem({ } }; + const handleKeyDown = (e: KeyboardEvent) => { + if (e.key === 'Enter' || e.key === ' ') { + e.preventDefault(); + e.stopPropagation(); + if (node.isDirectory) { + onToggle(); + } + } + }; + const handleDragStart = (e: DragEvent) => { e.stopPropagation(); setIsDragging(true); @@ -147,37 +159,47 @@ export function FileTreeItem({ return (
{/* Expand/collapse chevron for directories */} {node.isDirectory ? ( ) : ( - +
) : ( -
+
{/* Personal account */} {githubUsername && ( diff --git a/apps/frontend/src/renderer/components/Insights.tsx b/apps/frontend/src/renderer/components/Insights.tsx index 72e01a9af8..3f3a9b5fe6 100644 --- a/apps/frontend/src/renderer/components/Insights.tsx +++ b/apps/frontend/src/renderer/components/Insights.tsx @@ -1,4 +1,5 @@ -import { useState, useEffect, useRef } from 'react'; +import { useState, useEffect, useRef, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; import { MessageSquare, Send, @@ -15,7 +16,7 @@ import { PanelLeftClose, PanelLeft } from 'lucide-react'; -import ReactMarkdown from 'react-markdown'; +import ReactMarkdown, { type Components } from 'react-markdown'; import remarkGfm from 'remark-gfm'; import { Button } from './ui/button'; import { Textarea } from './ui/textarea'; @@ -46,42 +47,40 @@ import { TASK_COMPLEXITY_COLORS } from '../../shared/constants'; -// Safe link renderer for ReactMarkdown to prevent phishing and ensure external links open safely -const SafeLink = ({ href, children, ...props }: React.AnchorHTMLAttributes) => { - // Validate URL - only allow http, https, and relative links - const isValidUrl = href && ( - href.startsWith('http://') || - href.startsWith('https://') || - href.startsWith('/') || - href.startsWith('#') - ); - - if (!isValidUrl) { - // For invalid or potentially malicious URLs, render as plain text - return {children}; - } - - // External links get security attributes - const isExternal = href?.startsWith('http://') || href?.startsWith('https://'); - - return ( - - {children} - - ); -}; +// createSafeLink - factory function that creates a SafeLink component with i18n support +const createSafeLink = (opensInNewWindowText: string) => { + return function SafeLink({ href, children, ...props }: React.AnchorHTMLAttributes) { + // Validate URL - only allow http, https, and relative links + const isValidUrl = href && ( + href.startsWith('http://') || + href.startsWith('https://') || + href.startsWith('/') || + href.startsWith('#') + ); + + if (!isValidUrl) { + // For invalid or potentially malicious URLs, render as plain text + return {children}; + } -// Markdown components with safe link rendering -const markdownComponents = { - a: SafeLink, + // External links get security attributes and accessibility indicator + const isExternal = href?.startsWith('http://') || href?.startsWith('https://'); + + return ( + + {children} + {isExternal && {opensInNewWindowText}} + + ); + }; }; interface InsightsProps { @@ -89,6 +88,7 @@ interface InsightsProps { } export function Insights({ projectId }: InsightsProps) { + const { t } = useTranslation('common'); const session = useInsightsStore((state) => state.session); const sessions = useInsightsStore((state) => state.sessions); const status = useInsightsStore((state) => state.status); @@ -96,6 +96,11 @@ export function Insights({ projectId }: InsightsProps) { const currentTool = useInsightsStore((state) => state.currentTool); const isLoadingSessions = useInsightsStore((state) => state.isLoadingSessions); + // Create markdown components with translated accessibility text + const markdownComponents = useMemo(() => ({ + a: createSafeLink(t('accessibility.opensInNewWindow')), + }), [t]); + const [inputValue, setInputValue] = useState(''); const [creatingTask, setCreatingTask] = useState(null); const [taskCreated, setTaskCreated] = useState>(new Set()); @@ -295,6 +300,7 @@ export function Insights({ projectId }: InsightsProps) { handleCreateTask(message)} isCreatingTask={creatingTask === message.id} taskCreated={taskCreated.has(message.id)} @@ -387,6 +393,7 @@ export function Insights({ projectId }: InsightsProps) { interface MessageBubbleProps { message: InsightsChatMessage; + markdownComponents: Components; onCreateTask: () => void; isCreatingTask: boolean; taskCreated: boolean; @@ -394,6 +401,7 @@ interface MessageBubbleProps { function MessageBubble({ message, + markdownComponents, onCreateTask, isCreatingTask, taskCreated diff --git a/apps/frontend/src/renderer/components/KanbanBoard.tsx b/apps/frontend/src/renderer/components/KanbanBoard.tsx index de2ad394f4..4eb3c134ab 100644 --- a/apps/frontend/src/renderer/components/KanbanBoard.tsx +++ b/apps/frontend/src/renderer/components/KanbanBoard.tsx @@ -22,6 +22,7 @@ import { import { Plus, Inbox, Loader2, Eye, CheckCircle2, Archive, RefreshCw } from 'lucide-react'; import { ScrollArea } from './ui/scroll-area'; import { Button } from './ui/button'; +import { Tooltip, TooltipContent, TooltipTrigger } from './ui/tooltip'; import { TaskCard } from './TaskCard'; import { SortableTaskCard } from './SortableTaskCard'; import { TASK_STATUS_COLUMNS, TASK_STATUS_LABELS } from '../../shared/constants'; @@ -41,9 +42,13 @@ interface DroppableColumnProps { status: TaskStatus; tasks: Task[]; onTaskClick: (task: Task) => void; + onStatusChange: (taskId: string, newStatus: TaskStatus) => unknown; isOver: boolean; onAddClick?: () => void; onArchiveAll?: () => void; + archivedCount?: number; + showArchived?: boolean; + onToggleArchived?: () => void; } /** @@ -81,8 +86,12 @@ function droppableColumnPropsAreEqual( if (prevProps.status !== nextProps.status) return false; if (prevProps.isOver !== nextProps.isOver) return false; if (prevProps.onTaskClick !== nextProps.onTaskClick) return false; + if (prevProps.onStatusChange !== nextProps.onStatusChange) return false; if (prevProps.onAddClick !== nextProps.onAddClick) return false; if (prevProps.onArchiveAll !== nextProps.onArchiveAll) return false; + if (prevProps.archivedCount !== nextProps.archivedCount) return false; + if (prevProps.showArchived !== nextProps.showArchived) return false; + if (prevProps.onToggleArchived !== nextProps.onToggleArchived) return false; // Deep compare tasks const tasksEqual = tasksAreEquivalent(prevProps.tasks, nextProps.tasks); @@ -136,8 +145,8 @@ const getEmptyStateContent = (status: TaskStatus, t: (key: string) => string): { } }; -const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskClick, isOver, onAddClick, onArchiveAll }: DroppableColumnProps) { - const { t } = useTranslation('tasks'); +const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskClick, onStatusChange, isOver, onAddClick, onArchiveAll, archivedCount, showArchived, onToggleArchived }: DroppableColumnProps) { + const { t } = useTranslation(['tasks', 'common']); const { setNodeRef } = useDroppable({ id: status }); @@ -154,6 +163,15 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli return handlers; }, [tasks, onTaskClick]); + // Create stable onStatusChange handlers for each task + const onStatusChangeHandlers = useMemo(() => { + const handlers = new Map unknown>(); + tasks.forEach((task) => { + handlers.set(task.id, (newStatus: TaskStatus) => onStatusChange(task.id, newStatus)); + }); + return handlers; + }, [tasks, onStatusChange]); + // Memoize task card elements to prevent recreation on every render const taskCards = useMemo(() => { if (tasks.length === 0) return null; @@ -162,9 +180,10 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli key={task.id} task={task} onClick={onClickHandlers.get(task.id)!} + onStatusChange={onStatusChangeHandlers.get(task.id)} /> )); - }, [tasks, onClickHandlers]); + }, [tasks, onClickHandlers, onStatusChangeHandlers]); const getColumnBorderColor = (): string => { switch (status) { @@ -199,7 +218,7 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli

- {TASK_STATUS_LABELS[status]} + {t(TASK_STATUS_LABELS[status])}

{tasks.length} @@ -212,21 +231,48 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli size="icon" className="h-7 w-7 hover:bg-primary/10 hover:text-primary transition-colors" onClick={onAddClick} + aria-label={t('kanban.addTaskAriaLabel')} > )} - {status === 'done' && onArchiveAll && tasks.length > 0 && ( + {status === 'done' && onArchiveAll && tasks.length > 0 && !showArchived && ( )} + {status === 'done' && archivedCount !== undefined && archivedCount > 0 && onToggleArchived && ( + + + + + + {showArchived ? t('common:projectTab.hideArchived') : t('common:projectTab.showArchived')} + + + )}
@@ -277,11 +323,17 @@ const DroppableColumn = memo(function DroppableColumn({ status, tasks, onTaskCli ); }, droppableColumnPropsAreEqual); -export function KanbanBoard({ tasks, onTaskClick, onNewTaskClick }: KanbanBoardProps) { +export function KanbanBoard({ tasks, onTaskClick, onNewTaskClick, onRefresh, isRefreshing }: KanbanBoardProps) { const { t } = useTranslation('tasks'); const [activeTask, setActiveTask] = useState(null); const [overColumnId, setOverColumnId] = useState(null); - const { showArchived } = useViewState(); + const { showArchived, toggleShowArchived } = useViewState(); + + // Calculate archived count for Done column button + const archivedCount = useMemo(() => + tasks.filter(t => t.metadata?.archivedAt).length, + [tasks] + ); // Filter tasks based on archive status const filteredTasks = useMemo(() => { @@ -412,6 +464,21 @@ export function KanbanBoard({ tasks, onTaskClick, onNewTaskClick }: KanbanBoardP return (
+ {/* Kanban header with refresh button */} + {onRefresh && ( +
+ +
+ )} {/* Kanban columns */} ))}
diff --git a/apps/frontend/src/renderer/components/ProjectTabBar.tsx b/apps/frontend/src/renderer/components/ProjectTabBar.tsx index ef6e34d25d..7836b8c77a 100644 --- a/apps/frontend/src/renderer/components/ProjectTabBar.tsx +++ b/apps/frontend/src/renderer/components/ProjectTabBar.tsx @@ -1,4 +1,5 @@ import { useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; import { Plus } from 'lucide-react'; import { cn } from '../lib/utils'; import { Button } from './ui/button'; @@ -15,9 +16,6 @@ interface ProjectTabBarProps { className?: string; // Control props for active tab onSettingsClick?: () => void; - showArchived?: boolean; - archivedCount?: number; - onToggleArchived?: () => void; } export function ProjectTabBar({ @@ -27,11 +25,10 @@ export function ProjectTabBar({ onProjectClose, onAddProject, className, - onSettingsClick, - showArchived, - archivedCount, - onToggleArchived + onSettingsClick }: ProjectTabBarProps) { + const { t } = useTranslation('common'); + // Keyboard shortcuts for tab navigation useEffect(() => { const handleKeyDown = (e: KeyboardEvent) => { @@ -109,9 +106,6 @@ export function ProjectTabBar({ }} // Pass control props only for active tab onSettingsClick={isActiveTab ? onSettingsClick : undefined} - showArchived={isActiveTab ? showArchived : undefined} - archivedCount={isActiveTab ? archivedCount : undefined} - onToggleArchived={isActiveTab ? onToggleArchived : undefined} /> ); })} @@ -124,7 +118,7 @@ export function ProjectTabBar({ size="icon" className="h-8 w-8" onClick={onAddProject} - title="Add Project" + aria-label={t('projectTab.addProjectAriaLabel')} > diff --git a/apps/frontend/src/renderer/components/RateLimitModal.tsx b/apps/frontend/src/renderer/components/RateLimitModal.tsx index a364016595..b19c842afd 100644 --- a/apps/frontend/src/renderer/components/RateLimitModal.tsx +++ b/apps/frontend/src/renderer/components/RateLimitModal.tsx @@ -375,9 +375,11 @@ export function RateLimitModal() { size="sm" className="gap-2" onClick={handleUpgrade} + aria-label={t('accessibility.upgradeSubscriptionAriaLabel')} > - +
diff --git a/apps/frontend/src/renderer/components/Sidebar.tsx b/apps/frontend/src/renderer/components/Sidebar.tsx index ce40585225..5114b54960 100644 --- a/apps/frontend/src/renderer/components/Sidebar.tsx +++ b/apps/frontend/src/renderer/components/Sidebar.tsx @@ -276,6 +276,7 @@ export function Sidebar({ key={item.id} onClick={() => handleNavClick(item.id)} disabled={!selectedProjectId} + aria-keyshortcuts={item.shortcut} className={cn( 'flex w-full items-center gap-3 rounded-lg px-3 py-2.5 text-sm transition-all duration-200', 'hover:bg-accent hover:text-accent-foreground', @@ -354,6 +355,7 @@ export function Sidebar({ variant="ghost" size="icon" onClick={() => window.open('https://github.com/AndyMik90/Auto-Claude/issues', '_blank')} + aria-label={t('tooltips.help')} > diff --git a/apps/frontend/src/renderer/components/SortableProjectTab.tsx b/apps/frontend/src/renderer/components/SortableProjectTab.tsx index dc53e991ad..d57cf1292c 100644 --- a/apps/frontend/src/renderer/components/SortableProjectTab.tsx +++ b/apps/frontend/src/renderer/components/SortableProjectTab.tsx @@ -1,7 +1,7 @@ import { useSortable } from '@dnd-kit/sortable'; import { CSS } from '@dnd-kit/utilities'; import { useTranslation } from 'react-i18next'; -import { Settings2, Archive } from 'lucide-react'; +import { Settings2 } from 'lucide-react'; import { cn } from '../lib/utils'; import { Tooltip, TooltipContent, TooltipTrigger } from './ui/tooltip'; import type { Project } from '../../shared/types'; @@ -15,9 +15,6 @@ interface SortableProjectTabProps { onClose: (e: React.MouseEvent) => void; // Optional control props for active tab onSettingsClick?: () => void; - showArchived?: boolean; - archivedCount?: number; - onToggleArchived?: () => void; } // Detect if running on macOS for keyboard shortcut display @@ -31,10 +28,7 @@ export function SortableProjectTab({ tabIndex, onSelect, onClose, - onSettingsClick, - showArchived, - archivedCount, - onToggleArchived + onSettingsClick }: SortableProjectTabProps) { const { t } = useTranslation('common'); // Build tooltip with keyboard shortcut hint (only for tabs 1-9) @@ -148,42 +142,6 @@ export function SortableProjectTab({ )} - - {/* Archive toggle button with badge - responsive sizing */} - {onToggleArchived && ( - - - - - - {showArchived ? t('projectTab.hideArchived') : t('projectTab.showArchived')} - - - )} )} @@ -202,7 +160,7 @@ export function SortableProjectTab({ isActive && 'opacity-100' )} onClick={onClose} - aria-label={t('projectTab.closeTab')} + aria-label={t('projectTab.closeTabAriaLabel')} > diff --git a/apps/frontend/src/renderer/components/SortableTaskCard.tsx b/apps/frontend/src/renderer/components/SortableTaskCard.tsx index a23bac9224..b830a1817d 100644 --- a/apps/frontend/src/renderer/components/SortableTaskCard.tsx +++ b/apps/frontend/src/renderer/components/SortableTaskCard.tsx @@ -3,11 +3,12 @@ import { useSortable } from '@dnd-kit/sortable'; import { CSS } from '@dnd-kit/utilities'; import { TaskCard } from './TaskCard'; import { cn } from '../lib/utils'; -import type { Task } from '../../shared/types'; +import type { Task, TaskStatus } from '../../shared/types'; interface SortableTaskCardProps { task: Task; onClick: () => void; + onStatusChange?: (newStatus: TaskStatus) => unknown; } // Custom comparator - only re-render when task or onClick actually changed @@ -19,11 +20,12 @@ function sortableTaskCardPropsAreEqual( // for the task object and onClick handler return ( prevProps.task === nextProps.task && - prevProps.onClick === nextProps.onClick + prevProps.onClick === nextProps.onClick && + prevProps.onStatusChange === nextProps.onStatusChange ); } -export const SortableTaskCard = memo(function SortableTaskCard({ task, onClick }: SortableTaskCardProps) { +export const SortableTaskCard = memo(function SortableTaskCard({ task, onClick, onStatusChange }: SortableTaskCardProps) { const { attributes, listeners, @@ -58,7 +60,7 @@ export const SortableTaskCard = memo(function SortableTaskCard({ task, onClick } {...attributes} {...listeners} > - + ); }, sortableTaskCardPropsAreEqual); diff --git a/apps/frontend/src/renderer/components/SortableTerminalWrapper.tsx b/apps/frontend/src/renderer/components/SortableTerminalWrapper.tsx new file mode 100644 index 0000000000..ad6f421da7 --- /dev/null +++ b/apps/frontend/src/renderer/components/SortableTerminalWrapper.tsx @@ -0,0 +1,83 @@ +import React from 'react'; +import { useSortable } from '@dnd-kit/sortable'; +import { CSS } from '@dnd-kit/utilities'; +import type { Task } from '../../shared/types'; +import { Terminal } from './Terminal'; +import { cn } from '../lib/utils'; + +interface SortableTerminalWrapperProps { + id: string; + cwd?: string; + projectPath?: string; + isActive: boolean; + onClose: () => void; + onActivate: () => void; + tasks: Task[]; + onNewTaskClick?: () => void; + terminalCount: number; + isExpanded?: boolean; + onToggleExpand?: () => void; +} + +export function SortableTerminalWrapper({ + id, + cwd, + projectPath, + isActive, + onClose, + onActivate, + tasks, + onNewTaskClick, + terminalCount, + isExpanded, + onToggleExpand, +}: SortableTerminalWrapperProps) { + const { + attributes, + listeners, + setNodeRef, + transform, + transition, + isDragging, + } = useSortable({ + id, + data: { + type: 'terminal-panel', + terminalId: id, + }, + }); + + const style = { + transform: CSS.Transform.toString(transform), + transition, + zIndex: isDragging ? 50 : undefined, + }; + + return ( +
+ +
+ ); +} diff --git a/apps/frontend/src/renderer/components/TaskCard.tsx b/apps/frontend/src/renderer/components/TaskCard.tsx index 87ee9751cb..f07db15b13 100644 --- a/apps/frontend/src/renderer/components/TaskCard.tsx +++ b/apps/frontend/src/renderer/components/TaskCard.tsx @@ -1,9 +1,17 @@ import { useState, useEffect, useRef, useCallback, memo, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { Play, Square, Clock, Zap, Target, Shield, Gauge, Palette, FileCode, Bug, Wrench, Loader2, AlertTriangle, RotateCcw, Archive } from 'lucide-react'; +import { Play, Square, Clock, Zap, Target, Shield, Gauge, Palette, FileCode, Bug, Wrench, Loader2, AlertTriangle, RotateCcw, Archive, MoreVertical } from 'lucide-react'; import { Card, CardContent } from './ui/card'; import { Badge } from './ui/badge'; import { Button } from './ui/button'; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuLabel, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from './ui/dropdown-menu'; import { cn, formatRelativeTime, sanitizeMarkdownForDisplay } from '../lib/utils'; import { PhaseProgressIndicator } from './PhaseProgressIndicator'; import { @@ -16,10 +24,12 @@ import { TASK_PRIORITY_COLORS, TASK_PRIORITY_LABELS, EXECUTION_PHASE_LABELS, - EXECUTION_PHASE_BADGE_COLORS + EXECUTION_PHASE_BADGE_COLORS, + TASK_STATUS_COLUMNS, + TASK_STATUS_LABELS } from '../../shared/constants'; import { startTask, stopTask, checkTaskRunning, recoverStuckTask, isIncompleteHumanReview, archiveTasks } from '../stores/task-store'; -import type { Task, TaskCategory, ReviewReason } from '../../shared/types'; +import type { Task, TaskCategory, ReviewReason, TaskStatus } from '../../shared/types'; // Category icon mapping const CategoryIcon: Record = { @@ -37,6 +47,7 @@ const CategoryIcon: Record = { interface TaskCardProps { task: Task; onClick: () => void; + onStatusChange?: (newStatus: TaskStatus) => unknown; } // Custom comparator for React.memo - only re-render when relevant task data changes @@ -45,7 +56,7 @@ function taskCardPropsAreEqual(prevProps: TaskCardProps, nextProps: TaskCardProp const nextTask = nextProps.task; // Fast path: same reference - if (prevTask === nextTask && prevProps.onClick === nextProps.onClick) { + if (prevTask === nextTask && prevProps.onClick === nextProps.onClick && prevProps.onStatusChange === nextProps.onStatusChange) { return true; } @@ -83,7 +94,7 @@ function taskCardPropsAreEqual(prevProps: TaskCardProps, nextProps: TaskCardProp return isEqual; } -export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) { +export const TaskCard = memo(function TaskCard({ task, onClick, onStatusChange }: TaskCardProps) { const { t } = useTranslation('tasks'); const [isStuck, setIsStuck] = useState(false); const [isRecovering, setIsRecovering] = useState(false); @@ -100,8 +111,9 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) const isIncomplete = isIncompleteHumanReview(task); // Memoize expensive computations to avoid running on every render + // Truncate description for card display - full description shown in modal const sanitizedDescription = useMemo( - () => task.description ? sanitizeMarkdownForDisplay(task.description, 150) : null, + () => task.description ? sanitizeMarkdownForDisplay(task.description, 120) : null, [task.description] ); @@ -111,12 +123,40 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) [task.updatedAt] ); + // Memoize status menu items to avoid recreating on every render + const statusMenuItems = useMemo(() => { + if (!onStatusChange) return null; + return TASK_STATUS_COLUMNS.filter(status => status !== task.status).map((status) => ( + onStatusChange(status)} + > + {t(TASK_STATUS_LABELS[status])} + + )); + }, [task.status, onStatusChange, t]); + // Memoized stuck check function to avoid recreating on every render const performStuckCheck = useCallback(() => { + // IMPORTANT: If the execution phase is 'complete' or 'failed', the task is NOT stuck. + // It means the process has finished and status update is pending. + // This prevents false-positive "stuck" indicators when the process exits normally. + const currentPhase = task.executionProgress?.phase; + if (currentPhase === 'complete' || currentPhase === 'failed') { + setIsStuck(false); + return; + } + // Use requestIdleCallback for non-blocking check when available const doCheck = () => { checkTaskRunning(task.id).then((actuallyRunning) => { - setIsStuck(!actuallyRunning); + // Double-check the phase again in case it changed while waiting + const latestPhase = task.executionProgress?.phase; + if (latestPhase === 'complete' || latestPhase === 'failed') { + setIsStuck(false); + } else { + setIsStuck(!actuallyRunning); + } }); }; @@ -125,7 +165,7 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) } else { doCheck(); } - }, [task.id]); + }, [task.id, task.executionProgress?.phase]); // Check if task is stuck (status says in_progress but no actual process) // Add a longer grace period to avoid false positives during process spawn @@ -268,15 +308,24 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) onClick={onClick} > - {/* Header - improved visual hierarchy */} -
-

- {task.title} -

-
+ {/* Title - full width, no wrapper */} +

+ {task.title} +

+ + {/* Description - sanitized to handle markdown content (memoized) */} + {sanitizedDescription && ( +

+ {sanitizedDescription} +

+ )} + + {/* Metadata badges */} + {(task.metadata || isStuck || isIncomplete || hasActiveExecution || reviewReasonInfo) && ( +
{/* Stuck indicator - highest priority */} {isStuck && ( )} -
-
- - {/* Description - sanitized to handle markdown content (memoized) */} - {sanitizedDescription && ( -

- {sanitizedDescription} -

- )} - - {/* Metadata badges */} - {task.metadata && ( -
{/* Category badge with icon */} - {task.metadata.category && ( + {task.metadata?.category && ( )} {/* Impact badge - high visibility for important tasks */} - {task.metadata.impact && (task.metadata.impact === 'high' || task.metadata.impact === 'critical') && ( + {task.metadata?.impact && (task.metadata.impact === 'high' || task.metadata.impact === 'critical') && ( )} {/* Complexity badge */} - {task.metadata.complexity && ( + {task.metadata?.complexity && ( )} {/* Priority badge - only show urgent/high */} - {task.metadata.priority && (task.metadata.priority === 'urgent' || task.metadata.priority === 'high') && ( + {task.metadata?.priority && (task.metadata.priority === 'urgent' || task.metadata.priority === 'high') && ( )} {/* Security severity - always show */} - {task.metadata.securitySeverity && ( + {task.metadata?.securitySeverity && ( - {task.metadata.securitySeverity} severity + {task.metadata.securitySeverity} {t('metadata.severity')} )}
@@ -424,68 +460,92 @@ export const TaskCard = memo(function TaskCard({ task, onClick }: TaskCardProps) {relativeTime}
- {/* Action buttons */} - {isStuck ? ( - - ) : isIncomplete ? ( - - ) : task.status === 'done' && !task.metadata?.archivedAt ? ( - - ) : (task.status === 'backlog' || task.status === 'in_progress') && ( - - )} +
+ {/* Action buttons */} + {isStuck ? ( + + ) : isIncomplete ? ( + + ) : task.status === 'done' && !task.metadata?.archivedAt ? ( + + ) : (task.status === 'backlog' || task.status === 'in_progress') && ( + + )} + + {/* Move to menu for keyboard accessibility */} + {statusMenuItems && ( + + + + + e.stopPropagation()}> + {t('actions.moveTo')} + + {statusMenuItems} + + + )} +
diff --git a/apps/frontend/src/renderer/components/TaskCreationWizard.tsx b/apps/frontend/src/renderer/components/TaskCreationWizard.tsx index be45fd17ed..7f67f3e2b3 100644 --- a/apps/frontend/src/renderer/components/TaskCreationWizard.tsx +++ b/apps/frontend/src/renderer/components/TaskCreationWizard.tsx @@ -1,4 +1,5 @@ import { useState, useEffect, useCallback, useRef, useMemo, type ClipboardEvent, type DragEvent } from 'react'; +import { useTranslation } from 'react-i18next'; import { Loader2, ChevronDown, ChevronUp, Image as ImageIcon, X, RotateCcw, FolderTree, GitBranch } from 'lucide-react'; import { Dialog, @@ -59,6 +60,7 @@ export function TaskCreationWizard({ open, onOpenChange }: TaskCreationWizardProps) { + const { t } = useTranslation('tasks'); // Get selected agent profile from settings const { settings } = useSettingsStore(); const selectedProfile = DEFAULT_AGENT_PROFILES.find( @@ -80,6 +82,8 @@ export function TaskCreationWizard({ const [isLoadingBranches, setIsLoadingBranches] = useState(false); const [baseBranch, setBaseBranch] = useState(PROJECT_DEFAULT_BRANCH); const [projectDefaultBranch, setProjectDefaultBranch] = useState(''); + // Worktree isolation - default to true for safety + const [useWorktree, setUseWorktree] = useState(true); // Get project path from project store const projects = useProjectStore((state) => state.projects); @@ -622,17 +626,20 @@ export function TaskCreationWizard({ if (impact) metadata.impact = impact; if (model) metadata.model = model; if (thinkingLevel) metadata.thinkingLevel = thinkingLevel; - // Auto profile - per-phase configuration - if (profileId === 'auto') { + // All profiles now support per-phase configuration + // isAutoProfile indicates task uses phase-specific models/thinking + if (phaseModels && phaseThinking) { metadata.isAutoProfile = true; - if (phaseModels) metadata.phaseModels = phaseModels; - if (phaseThinking) metadata.phaseThinking = phaseThinking; + metadata.phaseModels = phaseModels; + metadata.phaseThinking = phaseThinking; } if (images.length > 0) metadata.attachedImages = images; if (allReferencedFiles.length > 0) metadata.referencedFiles = allReferencedFiles; if (requireReviewBeforeCoding) metadata.requireReviewBeforeCoding = true; // Only include baseBranch if it's not the project default placeholder if (baseBranch && baseBranch !== PROJECT_DEFAULT_BRANCH) metadata.baseBranch = baseBranch; + // Pass worktree preference - false means use --direct mode + if (!useWorktree) metadata.useWorktree = false; // Title is optional - if empty, it will be auto-generated by the backend const task = await createTask(projectId, title.trim(), description.trim(), metadata); @@ -669,6 +676,7 @@ export function TaskCreationWizard({ setReferencedFiles([]); setRequireReviewBeforeCoding(false); setBaseBranch(PROJECT_DEFAULT_BRANCH); + setUseWorktree(true); setError(null); setShowAdvanced(false); setShowFileExplorer(false); @@ -796,6 +804,8 @@ export function TaskCreationWizard({ onDrop={handleTextareaDrop} rows={5} disabled={isCreating} + aria-required="true" + aria-describedby="description-help" className={cn( "resize-y min-h-[120px] max-h-[400px] relative bg-transparent", // Visual feedback when dragging over textarea @@ -814,7 +824,7 @@ export function TaskCreationWizard({ /> )} -

+

Files and images can be copy/pasted or dragged & dropped into the description.

@@ -851,6 +861,7 @@ export function TaskCreationWizard({ e.stopPropagation(); setImages(prev => prev.filter(img => img.id !== image.id)); }} + aria-label={t('images.removeImageAriaLabel', { filename: image.filename })} > @@ -914,6 +925,8 @@ export function TaskCreationWizard({ 'w-full justify-between py-2 px-3 rounded-md hover:bg-muted/50' )} disabled={isCreating} + aria-expanded={showAdvanced} + aria-controls="advanced-options-section" > Classification (optional) {showAdvanced ? ( @@ -925,7 +938,7 @@ export function TaskCreationWizard({ {/* Advanced Options */} {showAdvanced && ( -
+
{/* Category */}
@@ -1057,6 +1070,8 @@ export function TaskCreationWizard({ 'w-full justify-between py-2 px-3 rounded-md hover:bg-muted/50' )} disabled={isCreating} + aria-expanded={showGitOptions} + aria-controls="git-options-section" > @@ -1076,7 +1091,7 @@ export function TaskCreationWizard({ {/* Git Options */} {showGitOptions && ( -
+
+ + {/* Workspace Isolation Toggle */} +
+ setUseWorktree(checked === true)} + disabled={isCreating} + /> +
+ +

+ {t('wizard.gitOptions.useWorktreeDescription')} +

+
+
)} {/* Error */} {error && ( -
+
{error}
diff --git a/apps/frontend/src/renderer/components/TaskEditDialog.tsx b/apps/frontend/src/renderer/components/TaskEditDialog.tsx index d87ae977f6..9f460975fc 100644 --- a/apps/frontend/src/renderer/components/TaskEditDialog.tsx +++ b/apps/frontend/src/renderer/components/TaskEditDialog.tsx @@ -25,6 +25,7 @@ * ``` */ import { useState, useEffect, useCallback, useRef, type ClipboardEvent, type DragEvent } from 'react'; +import { useTranslation } from 'react-i18next'; import { Loader2, Image as ImageIcon, ChevronDown, ChevronUp, X } from 'lucide-react'; import { Dialog, @@ -87,6 +88,7 @@ interface TaskEditDialogProps { } export function TaskEditDialog({ task, open, onOpenChange, onSaved }: TaskEditDialogProps) { + const { t } = useTranslation('tasks'); // Get selected agent profile from settings for defaults const { settings } = useSettingsStore(); const selectedProfile = DEFAULT_AGENT_PROFILES.find( @@ -421,14 +423,12 @@ export function TaskEditDialog({ task, open, onOpenChange, onSaved }: TaskEditDi if (impact) metadataUpdates.impact = impact; if (model) metadataUpdates.model = model as ModelType; if (thinkingLevel) metadataUpdates.thinkingLevel = thinkingLevel as ThinkingLevel; - // Auto profile - per-phase configuration - if (profileId === 'auto') { + // All profiles now support per-phase configuration + // isAutoProfile indicates task uses phase-specific models/thinking + if (phaseModels && phaseThinking) { metadataUpdates.isAutoProfile = true; - if (phaseModels) metadataUpdates.phaseModels = phaseModels; - if (phaseThinking) metadataUpdates.phaseThinking = phaseThinking; - } else { - // Clear auto profile fields if switching away from auto - metadataUpdates.isAutoProfile = false; + metadataUpdates.phaseModels = phaseModels; + metadataUpdates.phaseThinking = phaseThinking; } if (images.length > 0) metadataUpdates.attachedImages = images; metadataUpdates.requireReviewBeforeCoding = requireReviewBeforeCoding; @@ -487,12 +487,14 @@ export function TaskEditDialog({ task, open, onOpenChange, onSaved }: TaskEditDi onDrop={handleTextareaDrop} rows={5} disabled={isSaving} + aria-required="true" + aria-describedby="edit-description-help" className={cn( isDragOverTextarea && !isSaving && "border-primary bg-primary/5 ring-2 ring-primary/20" )} /> -

- Tip: Paste screenshots directly with {navigator.platform.includes('Mac') ? 'โŒ˜V' : 'Ctrl+V'} to add reference images. +

+ {t('images.pasteHint', { shortcut: navigator.platform.includes('Mac') ? 'โŒ˜V' : 'Ctrl+V' })}

@@ -549,6 +551,8 @@ export function TaskEditDialog({ task, open, onOpenChange, onSaved }: TaskEditDi 'w-full justify-between py-2 px-3 rounded-md hover:bg-muted/50' )} disabled={isSaving} + aria-expanded={showAdvanced} + aria-controls="edit-advanced-options" > Classification (optional) {showAdvanced ? ( @@ -560,7 +564,7 @@ export function TaskEditDialog({ task, open, onOpenChange, onSaved }: TaskEditDi {/* Advanced Options */} {showAdvanced && ( -
+
{/* Category */}
@@ -670,6 +674,8 @@ export function TaskEditDialog({ task, open, onOpenChange, onSaved }: TaskEditDi 'w-full justify-between py-2 px-3 rounded-md hover:bg-muted/50' )} disabled={isSaving} + aria-expanded={showImages} + aria-controls="edit-images-section" > @@ -689,7 +695,7 @@ export function TaskEditDialog({ task, open, onOpenChange, onSaved }: TaskEditDi {/* Image Upload Section */} {showImages && ( -
+

Attach screenshots, mockups, or diagrams to provide visual context for the AI.

@@ -725,7 +731,7 @@ export function TaskEditDialog({ task, open, onOpenChange, onSaved }: TaskEditDi {/* Error */} {error && ( -
+
{error}
diff --git a/apps/frontend/src/renderer/components/Terminal.tsx b/apps/frontend/src/renderer/components/Terminal.tsx index f4ecb96280..50d0d10507 100644 --- a/apps/frontend/src/renderer/components/Terminal.tsx +++ b/apps/frontend/src/renderer/components/Terminal.tsx @@ -1,16 +1,24 @@ -import { useEffect, useRef, useCallback } from 'react'; -import { useDroppable } from '@dnd-kit/core'; +import { useEffect, useRef, useCallback, useState, useMemo } from 'react'; +import { useDroppable, useDndContext } from '@dnd-kit/core'; import '@xterm/xterm/css/xterm.css'; import { FileDown } from 'lucide-react'; import { cn } from '../lib/utils'; import { useTerminalStore } from '../stores/terminal-store'; +import { useSettingsStore } from '../stores/settings-store'; +import { useToast } from '../hooks/use-toast'; import type { TerminalProps } from './terminal/types'; +import type { TerminalWorktreeConfig } from '../../shared/types'; import { TerminalHeader } from './terminal/TerminalHeader'; +import { CreateWorktreeDialog } from './terminal/CreateWorktreeDialog'; import { useXterm } from './terminal/useXterm'; import { usePtyProcess } from './terminal/usePtyProcess'; import { useTerminalEvents } from './terminal/useTerminalEvents'; import { useAutoNaming } from './terminal/useAutoNaming'; +// Minimum dimensions to prevent PTY creation with invalid sizes +const MIN_COLS = 10; +const MIN_ROWS = 3; + export function Terminal({ id, cwd, @@ -20,15 +28,36 @@ export function Terminal({ onActivate, tasks = [], onNewTaskClick, - terminalCount = 1 + terminalCount = 1, + dragHandleListeners, + isDragging, + isExpanded, + onToggleExpand, }: TerminalProps) { const isMountedRef = useRef(true); const isCreatedRef = useRef(false); + // Track deliberate terminal recreation (e.g., worktree switching) + // This prevents exit handlers from triggering auto-removal during controlled recreation + const isRecreatingRef = useRef(false); + + // Worktree dialog state + const [showWorktreeDialog, setShowWorktreeDialog] = useState(false); + // Terminal store const terminal = useTerminalStore((state) => state.terminals.find((t) => t.id === id)); const setClaudeMode = useTerminalStore((state) => state.setClaudeMode); const updateTerminal = useTerminalStore((state) => state.updateTerminal); const setAssociatedTask = useTerminalStore((state) => state.setAssociatedTask); + const setWorktreeConfig = useTerminalStore((state) => state.setWorktreeConfig); + + // Use cwd from store if available (for worktree), otherwise use prop + const effectiveCwd = terminal?.cwd || cwd; + + // Settings store for IDE preferences + const { settings } = useSettingsStore(); + + // Toast for user feedback + const { toast } = useToast(); const associatedTask = terminal?.associatedTaskId ? tasks.find((t) => t.id === terminal.associatedTaskId) @@ -40,12 +69,29 @@ export function Terminal({ data: { type: 'terminal', terminalId: id } }); + // Check if a terminal is being dragged (vs a file) + const { active } = useDndContext(); + const isDraggingTerminal = active?.data.current?.type === 'terminal-panel'; + // Only show file drop overlay when dragging files, not terminals + const showFileDropOverlay = isOver && !isDraggingTerminal; + // Auto-naming functionality const { handleCommandEnter, cleanup: cleanupAutoNaming } = useAutoNaming({ terminalId: id, - cwd, + cwd: effectiveCwd, }); + // Track when xterm dimensions are ready for PTY creation + const [readyDimensions, setReadyDimensions] = useState<{ cols: number; rows: number } | null>(null); + + // Callback when xterm has measured valid dimensions + const handleDimensionsReady = useCallback((cols: number, rows: number) => { + // Only set dimensions if they're valid (above minimum thresholds) + if (cols >= MIN_COLS && rows >= MIN_ROWS) { + setReadyDimensions({ cols, rows }); + } + }, []); + // Initialize xterm with command tracking const { terminalRef, @@ -64,15 +110,34 @@ export function Terminal({ window.electronAPI.resizeTerminal(id, cols, rows); } }, + onDimensionsReady: handleDimensionsReady, }); - // Create PTY process - usePtyProcess({ + // Use ready dimensions for PTY creation (wait until xterm has measured) + // This prevents creating PTY with default 80x24 when container is smaller + const ptyDimensions = useMemo(() => { + if (readyDimensions) { + return readyDimensions; + } + // Fallback to current dimensions if they're valid + if (cols >= MIN_COLS && rows >= MIN_ROWS) { + return { cols, rows }; + } + // Return null to prevent PTY creation until dimensions are ready + return null; + }, [readyDimensions, cols, rows]); + + // Create PTY process - only when we have valid dimensions + const { prepareForRecreate, resetForRecreate } = usePtyProcess({ terminalId: id, - cwd, + cwd: effectiveCwd, projectPath, - cols, - rows, + cols: ptyDimensions?.cols ?? 80, + rows: ptyDimensions?.rows ?? 24, + // Only allow PTY creation when dimensions are ready + skipCreation: !ptyDimensions, + // Pass recreation ref to coordinate with deliberate terminal destruction/recreation + isRecreatingRef, onCreated: () => { isCreatedRef.current = true; }, @@ -84,6 +149,8 @@ export function Terminal({ // Handle terminal events useTerminalEvents({ terminalId: id, + // Pass recreation ref to skip auto-removal during deliberate terminal recreation + isRecreatingRef, onOutput: (data) => { write(data); }, @@ -100,6 +167,36 @@ export function Terminal({ } }, [isActive, focus]); + // Trigger deferred Claude resume when terminal becomes active + // This ensures Claude sessions are only resumed when the user actually views the terminal, + // preventing all terminals from resuming simultaneously on app startup (which can crash the app) + useEffect(() => { + if (isActive && terminal?.pendingClaudeResume) { + // Clear the pending flag and trigger the actual resume + useTerminalStore.getState().setPendingClaudeResume(id, false); + window.electronAPI.activateDeferredClaudeResume(id); + } + }, [isActive, id, terminal?.pendingClaudeResume]); + + // Handle keyboard shortcuts for this terminal + useEffect(() => { + const handleKeyDown = (e: KeyboardEvent) => { + // Only handle if this terminal is active + if (!isActive) return; + + // Cmd/Ctrl+W to close terminal + if ((e.ctrlKey || e.metaKey) && e.key === 'w') { + e.preventDefault(); + e.stopPropagation(); + onClose(); + } + }; + + // Use capture phase to get the event before xterm + window.addEventListener('keydown', handleKeyDown, true); + return () => window.removeEventListener('keydown', handleKeyDown, true); + }, [isActive, onClose]); + // Cleanup on unmount useEffect(() => { isMountedRef.current = true; @@ -119,8 +216,8 @@ export function Terminal({ const handleInvokeClaude = useCallback(() => { setClaudeMode(id, true); - window.electronAPI.invokeClaudeInTerminal(id, cwd); - }, [id, cwd, setClaudeMode]); + window.electronAPI.invokeClaudeInTerminal(id, effectiveCwd); + }, [id, effectiveCwd, setClaudeMode]); const handleClick = useCallback(() => { onActivate(); @@ -129,6 +226,8 @@ export function Terminal({ const handleTitleChange = useCallback((newTitle: string) => { updateTerminal(id, { title: newTitle }); + // Sync to main process so title persists across hot reloads + window.electronAPI.setTerminalTitle(id, newTitle); }, [id, updateTerminal]); const handleTaskSelect = useCallback((taskId: string) => { @@ -137,6 +236,8 @@ export function Terminal({ setAssociatedTask(id, taskId); updateTerminal(id, { title: selectedTask.title }); + // Sync to main process so title persists across hot reloads + window.electronAPI.setTerminalTitle(id, selectedTask.title); const contextMessage = `I'm working on: ${selectedTask.title} @@ -151,19 +252,115 @@ Please confirm you're ready by saying: I'm ready to work on ${selectedTask.title const handleClearTask = useCallback(() => { setAssociatedTask(id, undefined); updateTerminal(id, { title: 'Claude' }); + // Sync to main process so title persists across hot reloads + window.electronAPI.setTerminalTitle(id, 'Claude'); }, [id, setAssociatedTask, updateTerminal]); + // Worktree handlers + const handleCreateWorktree = useCallback(() => { + setShowWorktreeDialog(true); + }, []); + + const handleWorktreeCreated = useCallback(async (config: TerminalWorktreeConfig) => { + // IMPORTANT: Set isRecreatingRef BEFORE destruction to signal deliberate recreation + // This prevents exit handlers from triggering auto-removal during controlled recreation + isRecreatingRef.current = true; + + // Set isCreatingRef BEFORE updating the store to prevent race condition + // This prevents the PTY effect from running before destroyTerminal completes + prepareForRecreate(); + + // Update terminal store with worktree config + setWorktreeConfig(id, config); + // Sync to main process so worktree config persists across hot reloads + window.electronAPI.setTerminalWorktreeConfig(id, config); + + // Update terminal title and cwd to worktree path + updateTerminal(id, { title: config.name, cwd: config.worktreePath }); + // Sync to main process so title persists across hot reloads + window.electronAPI.setTerminalTitle(id, config.name); + + // Destroy current PTY - a new one will be created in the worktree directory + if (isCreatedRef.current) { + await window.electronAPI.destroyTerminal(id); + isCreatedRef.current = false; + } + + // Reset refs to allow recreation - effect will now trigger with new cwd + resetForRecreate(); + }, [id, setWorktreeConfig, updateTerminal, prepareForRecreate, resetForRecreate]); + + const handleSelectWorktree = useCallback(async (config: TerminalWorktreeConfig) => { + // IMPORTANT: Set isRecreatingRef BEFORE destruction to signal deliberate recreation + // This prevents exit handlers from triggering auto-removal during controlled recreation + isRecreatingRef.current = true; + + // Set isCreatingRef BEFORE updating the store to prevent race condition + prepareForRecreate(); + + // Same logic as handleWorktreeCreated - attach terminal to existing worktree + setWorktreeConfig(id, config); + // Sync to main process so worktree config persists across hot reloads + window.electronAPI.setTerminalWorktreeConfig(id, config); + updateTerminal(id, { title: config.name, cwd: config.worktreePath }); + // Sync to main process so title persists across hot reloads + window.electronAPI.setTerminalTitle(id, config.name); + + // Destroy current PTY - a new one will be created in the worktree directory + if (isCreatedRef.current) { + await window.electronAPI.destroyTerminal(id); + isCreatedRef.current = false; + } + + resetForRecreate(); + }, [id, setWorktreeConfig, updateTerminal, prepareForRecreate, resetForRecreate]); + + const handleOpenInIDE = useCallback(async () => { + const worktreePath = terminal?.worktreeConfig?.worktreePath; + if (!worktreePath) return; + + const preferredIDE = settings.preferredIDE || 'vscode'; + try { + await window.electronAPI.worktreeOpenInIDE( + worktreePath, + preferredIDE, + settings.customIDEPath + ); + } catch (err) { + console.error('Failed to open in IDE:', err); + toast({ + title: 'Failed to open IDE', + description: err instanceof Error ? err.message : 'Could not launch IDE', + variant: 'destructive', + }); + } + }, [terminal?.worktreeConfig?.worktreePath, settings.preferredIDE, settings.customIDEPath, toast]); + + // Get backlog tasks for worktree dialog + const backlogTasks = tasks.filter((t) => t.status === 'backlog'); + + // Determine border color based on Claude busy state + // Red (busy) = Claude is actively processing + // Green (idle) = Claude is ready for input + const isClaudeBusy = terminal?.isClaudeBusy; + const showClaudeBusyIndicator = terminal?.isClaudeMode && isClaudeBusy !== undefined; + return (
- {isOver && ( + {showFileDropOverlay && (
@@ -186,6 +383,14 @@ Please confirm you're ready by saying: I'm ready to work on ${selectedTask.title onClearTask={handleClearTask} onNewTaskClick={onNewTaskClick} terminalCount={terminalCount} + worktreeConfig={terminal?.worktreeConfig} + projectPath={projectPath} + onCreateWorktree={handleCreateWorktree} + onSelectWorktree={handleSelectWorktree} + onOpenInIDE={handleOpenInIDE} + dragHandleListeners={dragHandleListeners} + isExpanded={isExpanded} + onToggleExpand={onToggleExpand} />
+ + {/* Worktree creation dialog */} + {projectPath && ( + + )}
); } diff --git a/apps/frontend/src/renderer/components/TerminalGrid.tsx b/apps/frontend/src/renderer/components/TerminalGrid.tsx index 5ab6008ea8..2c2d43903d 100644 --- a/apps/frontend/src/renderer/components/TerminalGrid.tsx +++ b/apps/frontend/src/renderer/components/TerminalGrid.tsx @@ -10,11 +10,17 @@ import { type DragEndEvent, type DragStartEvent, PointerSensor, + KeyboardSensor, useSensor, useSensors } from '@dnd-kit/core'; -import { Plus, Sparkles, Grid2X2, FolderTree, File, Folder, History, ChevronDown, Loader2 } from 'lucide-react'; -import { Terminal } from './Terminal'; +import { + SortableContext, + rectSortingStrategy, + sortableKeyboardCoordinates, +} from '@dnd-kit/sortable'; +import { Plus, Sparkles, Grid2X2, FolderTree, File, Folder, History, ChevronDown, Loader2, TerminalSquare } from 'lucide-react'; +import { SortableTerminalWrapper } from './SortableTerminalWrapper'; import { Button } from './ui/button'; import { DropdownMenu, @@ -40,18 +46,21 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: const allTerminals = useTerminalStore((state) => state.terminals); // Filter terminals to show only those belonging to the current project // Also include legacy terminals without projectPath (created before this change) - const terminals = useMemo(() => - projectPath + // Exclude exited terminals as they are no longer functional + const terminals = useMemo(() => { + const filtered = projectPath ? allTerminals.filter(t => t.projectPath === projectPath || !t.projectPath) - : allTerminals, - [allTerminals, projectPath] - ); + : allTerminals; + // Exclude exited terminals from the visible list + return filtered.filter(t => t.status !== 'exited'); + }, [allTerminals, projectPath]); const activeTerminalId = useTerminalStore((state) => state.activeTerminalId); const addTerminal = useTerminalStore((state) => state.addTerminal); const removeTerminal = useTerminalStore((state) => state.removeTerminal); const setActiveTerminal = useTerminalStore((state) => state.setActiveTerminal); const canAddTerminal = useTerminalStore((state) => state.canAddTerminal); const setClaudeMode = useTerminalStore((state) => state.setClaudeMode); + const reorderTerminals = useTerminalStore((state) => state.reorderTerminals); // Get tasks from task store for task selection dropdown in terminals const tasks = useTaskStore((state) => state.tasks); @@ -65,6 +74,9 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: const [isLoadingDates, setIsLoadingDates] = useState(false); const [isRestoring, setIsRestoring] = useState(false); + // Expanded terminal state - when set, this terminal takes up the full grid space + const [expandedTerminalId, setExpandedTerminalId] = useState(null); + // Fetch available session dates when project changes useEffect(() => { if (!projectPath) { @@ -155,26 +167,37 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: } }, [projectPath, terminals, removeTerminal, addRestoredTerminal, isRestoring]); - // Setup drag sensors + // Setup drag sensors for both file and terminal drag operations const sensors = useSensors( useSensor(PointerSensor, { activationConstraint: { distance: 8, // 8px movement required before drag starts }, + }), + useSensor(KeyboardSensor, { + coordinateGetter: sortableKeyboardCoordinates, }) ); - // Track dragging state for overlay + // Track dragging state for file overlay const [activeDragData, setActiveDragData] = React.useState<{ path: string; name: string; isDirectory: boolean; } | null>(null); + // Track dragging terminal for overlay + const [draggingTerminalId, setDraggingTerminalId] = React.useState(null); + const draggingTerminal = terminals.find(t => t.id === draggingTerminalId); + const handleCloseTerminal = useCallback((id: string) => { window.electronAPI.destroyTerminal(id); removeTerminal(id); - }, [removeTerminal]); + // Clear expanded state if the closed terminal was expanded + if (expandedTerminalId === id) { + setExpandedTerminalId(null); + } + }, [removeTerminal, expandedTerminalId]); // Handle keyboard shortcut for new terminal (only when this view is active) useEffect(() => { @@ -184,7 +207,7 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: // Ctrl+T or Cmd+T for new terminal if ((e.ctrlKey || e.metaKey) && e.key === 't') { e.preventDefault(); - if (canAddTerminal()) { + if (canAddTerminal(projectPath)) { addTerminal(projectPath, projectPath); } } @@ -200,11 +223,16 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: }, [isActive, addTerminal, canAddTerminal, projectPath, activeTerminalId, handleCloseTerminal]); const handleAddTerminal = useCallback(() => { - if (canAddTerminal()) { + if (canAddTerminal(projectPath)) { addTerminal(projectPath, projectPath); } }, [addTerminal, canAddTerminal, projectPath]); + // Toggle terminal expand state + const handleToggleExpand = useCallback((terminalId: string) => { + setExpandedTerminalId(prev => prev === terminalId ? null : terminalId); + }, []); + const handleInvokeClaudeAll = useCallback(() => { terminals.forEach((terminal) => { if (terminal.status === 'running' && !terminal.isClaudeMode) { @@ -218,42 +246,63 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: const handleDragStart = useCallback((event: DragStartEvent) => { const data = event.active.data.current as { type: string; - path: string; - name: string; - isDirectory: boolean; + path?: string; + name?: string; + isDirectory?: boolean; + terminalId?: string; } | undefined; - if (data?.type === 'file') { + if (data?.type === 'file' && data.path && data.name !== undefined) { setActiveDragData({ path: data.path, name: data.name, - isDirectory: data.isDirectory + isDirectory: data.isDirectory ?? false }); + } else if (data?.type === 'terminal-panel') { + setDraggingTerminalId(event.active.id.toString()); } }, []); - // Handle drag end - insert file path into terminal + // Handle drag end - insert file path into terminal or reorder terminals const handleDragEnd = useCallback((event: DragEndEvent) => { const { active, over } = event; + const activeData = active.data.current as { type?: string; path?: string } | undefined; + // Clear drag states setActiveDragData(null); + setDraggingTerminalId(null); if (!over) return; - // Check if dropped on a terminal + // Handle terminal reordering + if (activeData?.type === 'terminal-panel') { + const activeId = active.id.toString(); + let overId = over.id.toString(); + + // Handle case where over is the file drop zone (terminal-xyz) instead of sortable item (xyz) + if (overId.startsWith('terminal-')) { + overId = overId.replace('terminal-', ''); + } + + if (activeId !== overId && terminals.some(t => t.id === overId)) { + reorderTerminals(activeId, overId); + } + return; + } + + // Handle file drop on terminal const overId = over.id.toString(); if (overId.startsWith('terminal-')) { const terminalId = overId.replace('terminal-', ''); - const data = active.data.current as { path?: string } | undefined; - if (data?.path) { + if (activeData?.path) { // Quote the path if it contains spaces - const quotedPath = data.path.includes(' ') ? `"${data.path}"` : data.path; + const quotedPath = activeData.path.includes(' ') ? `"${activeData.path}"` : activeData.path; // Insert the file path into the terminal with a trailing space window.electronAPI.sendTerminalInput(terminalId, quotedPath + ' '); } } - }, []); + }, [reorderTerminals]); // Calculate grid layout based on number of terminals const gridLayout = useMemo(() => { @@ -279,6 +328,9 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: return rows; }, [terminals, gridLayout]); + // Terminal IDs for SortableContext + const terminalIds = useMemo(() => terminals.map(t => t.id), [terminals]); + // Empty state if (terminals.length === 0) { return ( @@ -373,7 +425,7 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: size="sm" className="h-7 text-xs gap-1.5" onClick={handleAddTerminal} - disabled={!canAddTerminal()} + disabled={!canAddTerminal(projectPath)} > New Terminal @@ -403,41 +455,71 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: "flex-1 overflow-hidden p-2 transition-all duration-300 ease-out", fileExplorerOpen && "pr-0" )}> - - {terminalRows.map((row, rowIndex) => ( - - - - {row.map((terminal, colIndex) => ( - - -
- handleCloseTerminal(terminal.id)} - onActivate={() => setActiveTerminal(terminal.id)} - tasks={tasks} - onNewTaskClick={onNewTaskClick} - terminalCount={terminals.length} - /> -
-
- {colIndex < row.length - 1 && ( - - )} -
- ))} -
-
- {rowIndex < terminalRows.length - 1 && ( - - )} -
- ))} -
+ {expandedTerminalId ? ( + // Show only the expanded terminal + (() => { + const expandedTerminal = terminals.find(t => t.id === expandedTerminalId); + if (!expandedTerminal) return null; + return ( +
+ handleCloseTerminal(expandedTerminal.id)} + onActivate={() => setActiveTerminal(expandedTerminal.id)} + tasks={tasks} + onNewTaskClick={onNewTaskClick} + terminalCount={1} + isExpanded={true} + onToggleExpand={() => handleToggleExpand(expandedTerminal.id)} + /> +
+ ); + })() + ) : ( + // Show the normal grid layout + + + {terminalRows.map((row, rowIndex) => ( + + + + {row.map((terminal, colIndex) => ( + + +
+ handleCloseTerminal(terminal.id)} + onActivate={() => setActiveTerminal(terminal.id)} + tasks={tasks} + onNewTaskClick={onNewTaskClick} + terminalCount={terminals.length} + isExpanded={false} + onToggleExpand={() => handleToggleExpand(terminal.id)} + /> +
+
+ {colIndex < row.length - 1 && ( + + )} +
+ ))} +
+
+ {rowIndex < terminalRows.length - 1 && ( + + )} +
+ ))} +
+
+ )}
{/* File explorer panel (slides from right, pushes content) */} @@ -456,6 +538,12 @@ export function TerminalGrid({ projectPath, onNewTaskClick, isActive = false }: {activeDragData.name}
)} + {draggingTerminal && ( +
+ + {draggingTerminal.title || 'Terminal'} +
+ )}
diff --git a/apps/frontend/src/renderer/components/WelcomeScreen.tsx b/apps/frontend/src/renderer/components/WelcomeScreen.tsx index 64d05783ec..71a2863ba6 100644 --- a/apps/frontend/src/renderer/components/WelcomeScreen.tsx +++ b/apps/frontend/src/renderer/components/WelcomeScreen.tsx @@ -91,6 +91,7 @@ export function WelcomeScreen({ key={project.id} onClick={() => onSelectProject(project.id)} className="w-full flex items-center gap-3 rounded-lg px-3 py-3 text-left transition-colors hover:bg-accent/50 group" + aria-label={t('welcome:recentProjects.openProjectAriaLabel', { name: project.name })} >
diff --git a/apps/frontend/src/renderer/components/Worktrees.tsx b/apps/frontend/src/renderer/components/Worktrees.tsx index 70d1927f5d..ba7f51c7b5 100644 --- a/apps/frontend/src/renderer/components/Worktrees.tsx +++ b/apps/frontend/src/renderer/components/Worktrees.tsx @@ -6,13 +6,15 @@ import { Loader2, AlertCircle, FolderOpen, + FolderGit, GitMerge, FileCode, Plus, Minus, ChevronRight, Check, - X + X, + Terminal } from 'lucide-react'; import { Button } from './ui/button'; import { Badge } from './ui/badge'; @@ -38,7 +40,7 @@ import { } from './ui/alert-dialog'; import { useProjectStore } from '../stores/project-store'; import { useTaskStore } from '../stores/task-store'; -import type { WorktreeListItem, WorktreeMergeResult } from '../../shared/types'; +import type { WorktreeListItem, WorktreeMergeResult, TerminalWorktreeConfig } from '../../shared/types'; interface WorktreesProps { projectId: string; @@ -50,9 +52,14 @@ export function Worktrees({ projectId }: WorktreesProps) { const tasks = useTaskStore((state) => state.tasks); const [worktrees, setWorktrees] = useState([]); + const [terminalWorktrees, setTerminalWorktrees] = useState([]); const [isLoading, setIsLoading] = useState(false); const [error, setError] = useState(null); + // Terminal worktree delete state + const [terminalWorktreeToDelete, setTerminalWorktreeToDelete] = useState(null); + const [isDeletingTerminal, setIsDeletingTerminal] = useState(false); + // Merge dialog state const [showMergeDialog, setShowMergeDialog] = useState(false); const [selectedWorktree, setSelectedWorktree] = useState(null); @@ -64,26 +71,42 @@ export function Worktrees({ projectId }: WorktreesProps) { const [worktreeToDelete, setWorktreeToDelete] = useState(null); const [isDeleting, setIsDeleting] = useState(false); - // Load worktrees + // Load worktrees (both task and terminal worktrees) const loadWorktrees = useCallback(async () => { - if (!projectId) return; + if (!projectId || !selectedProject) return; setIsLoading(true); setError(null); try { - const result = await window.electronAPI.listWorktrees(projectId); - if (result.success && result.data) { - setWorktrees(result.data.worktrees); + // Fetch both task worktrees and terminal worktrees in parallel + const [taskResult, terminalResult] = await Promise.all([ + window.electronAPI.listWorktrees(projectId), + window.electronAPI.listTerminalWorktrees(selectedProject.path) + ]); + + console.log('[Worktrees] Task worktrees result:', taskResult); + console.log('[Worktrees] Terminal worktrees result:', terminalResult); + + if (taskResult.success && taskResult.data) { + setWorktrees(taskResult.data.worktrees); } else { - setError(result.error || 'Failed to load worktrees'); + setError(taskResult.error || 'Failed to load task worktrees'); + } + + if (terminalResult.success && terminalResult.data) { + console.log('[Worktrees] Setting terminal worktrees:', terminalResult.data); + setTerminalWorktrees(terminalResult.data); + } else { + console.warn('[Worktrees] Terminal worktrees fetch failed or empty:', terminalResult); } } catch (err) { + console.error('[Worktrees] Error loading worktrees:', err); setError(err instanceof Error ? err.message : 'Failed to load worktrees'); } finally { setIsLoading(false); } - }, [projectId]); + }, [projectId, selectedProject]); // Load on mount and when project changes useEffect(() => { @@ -171,6 +194,31 @@ export function Worktrees({ projectId }: WorktreesProps) { setShowDeleteConfirm(true); }; + // Handle terminal worktree delete + const handleDeleteTerminalWorktree = async () => { + if (!terminalWorktreeToDelete || !selectedProject) return; + + setIsDeletingTerminal(true); + try { + const result = await window.electronAPI.removeTerminalWorktree( + selectedProject.path, + terminalWorktreeToDelete.name, + terminalWorktreeToDelete.hasGitBranch // Delete the branch too if it was created + ); + if (result.success) { + // Refresh worktrees after successful delete + await loadWorktrees(); + setTerminalWorktreeToDelete(null); + } else { + setError(result.error || 'Failed to delete terminal worktree'); + } + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to delete terminal worktree'); + } finally { + setIsDeletingTerminal(false); + } + }; + if (!selectedProject) { return (
@@ -217,14 +265,14 @@ export function Worktrees({ projectId }: WorktreesProps) { )} {/* Loading state */} - {isLoading && worktrees.length === 0 && ( + {isLoading && worktrees.length === 0 && terminalWorktrees.length === 0 && (
)} {/* Empty state */} - {!isLoading && worktrees.length === 0 && ( + {!isLoading && worktrees.length === 0 && terminalWorktrees.length === 0 && (
@@ -232,102 +280,186 @@ export function Worktrees({ projectId }: WorktreesProps) {

No Worktrees

Worktrees are created automatically when Auto Claude builds features. - They provide isolated workspaces for each task. + You can also create terminal worktrees from the Agent Terminals tab.

)} - {/* Worktrees list */} - {worktrees.length > 0 && ( + {/* Main content area with scroll */} + {(worktrees.length > 0 || terminalWorktrees.length > 0) && ( -
- {worktrees.map((worktree) => { - const task = findTaskForWorktree(worktree.specName); - return ( - - -
-
- - - {worktree.branch} - - {task && ( - - {task.title} - +
+ {/* Task Worktrees Section */} + {worktrees.length > 0 && ( +
+

+ + Task Worktrees +

+ {worktrees.map((worktree) => { + const task = findTaskForWorktree(worktree.specName); + return ( + + +
+
+ + + {worktree.branch} + + {task && ( + + {task.title} + + )} +
+ + {worktree.specName} + +
+
+ + {/* Stats */} +
+
+ + {worktree.filesChanged} files changed +
+
+ + {worktree.commitCount} commits ahead +
+
+ + {worktree.additions} +
+
+ + {worktree.deletions} +
+
+ + {/* Branch info */} +
+ {worktree.baseBranch} + + {worktree.branch} +
+ + {/* Actions */} +
+ + + +
+
+
+ ); + })} +
+ )} + + {/* Terminal Worktrees Section */} + {terminalWorktrees.length > 0 && ( +
+

+ + Terminal Worktrees +

+ {terminalWorktrees.map((wt) => ( + + +
+
+ + + {wt.name} + + {wt.branchName && ( + + {wt.branchName} + + )} +
+ {wt.taskId && ( + + {wt.taskId} + )}
- - {worktree.specName} - -
- - - {/* Stats */} -
-
- - {worktree.filesChanged} files changed -
-
- - {worktree.commitCount} commits ahead -
-
- - {worktree.additions} -
-
- - {worktree.deletions} + + + {/* Branch info */} + {wt.baseBranch && wt.branchName && ( +
+ {wt.baseBranch} + + {wt.branchName} +
+ )} + + {/* Created at */} + {wt.createdAt && ( +
+ Created {new Date(wt.createdAt).toLocaleDateString()} +
+ )} + + {/* Actions */} +
+ +
-
- - {/* Branch info */} -
- {worktree.baseBranch} - - {worktree.branch} -
- - {/* Actions */} -
- - - -
- - - ); - })} + + + ))} +
+ )}
)} @@ -474,6 +606,46 @@ export function Worktrees({ projectId }: WorktreesProps) { + + {/* Terminal Worktree Delete Confirmation Dialog */} + !open && setTerminalWorktreeToDelete(null)}> + + + Delete Terminal Worktree? + + This will permanently delete the worktree and its branch. Any uncommitted changes will be lost. + {terminalWorktreeToDelete && ( + + {terminalWorktreeToDelete.name} + {terminalWorktreeToDelete.branchName && ( + ({terminalWorktreeToDelete.branchName}) + )} + + )} + + + + Cancel + + {isDeletingTerminal ? ( + <> + + Deleting... + + ) : ( + <> + + Delete + + )} + + + +
); } diff --git a/apps/frontend/src/renderer/components/context/MemoriesTab.tsx b/apps/frontend/src/renderer/components/context/MemoriesTab.tsx index 53fdb0fe21..49304ae870 100644 --- a/apps/frontend/src/renderer/components/context/MemoriesTab.tsx +++ b/apps/frontend/src/renderer/components/context/MemoriesTab.tsx @@ -1,11 +1,16 @@ -import { useState } from 'react'; +import { useState, useMemo } from 'react'; import { RefreshCw, Database, Brain, Search, CheckCircle, - XCircle + XCircle, + GitPullRequest, + Lightbulb, + FolderTree, + Code, + AlertTriangle } from 'lucide-react'; import { Button } from '../ui/button'; import { Card, CardContent, CardHeader, CardTitle } from '../ui/card'; @@ -15,8 +20,11 @@ import { ScrollArea } from '../ui/scroll-area'; import { cn } from '../../lib/utils'; import { MemoryCard } from './MemoryCard'; import { InfoItem } from './InfoItem'; +import { memoryFilterCategories } from './constants'; import type { GraphitiMemoryStatus, GraphitiMemoryState, MemoryEpisode } from '../../../shared/types'; +type FilterCategory = keyof typeof memoryFilterCategories; + interface MemoriesTabProps { memoryStatus: GraphitiMemoryStatus | null; memoryState: GraphitiMemoryState | null; @@ -27,6 +35,39 @@ interface MemoriesTabProps { onSearch: (query: string) => void; } +// Helper to check if memory is a PR review (by type or content) +function isPRReview(memory: MemoryEpisode): boolean { + if (['pr_review', 'pr_finding', 'pr_pattern', 'pr_gotcha'].includes(memory.type)) { + return true; + } + try { + const parsed = JSON.parse(memory.content); + return parsed.prNumber !== undefined && parsed.verdict !== undefined; + } catch { + return false; + } +} + +// Get the effective category for a memory +function getMemoryCategory(memory: MemoryEpisode): FilterCategory { + if (isPRReview(memory)) return 'pr'; + if (['session_insight', 'task_outcome'].includes(memory.type)) return 'sessions'; + if (['codebase_discovery', 'codebase_map'].includes(memory.type)) return 'codebase'; + if (['pattern', 'pr_pattern'].includes(memory.type)) return 'patterns'; + if (['gotcha', 'pr_gotcha'].includes(memory.type)) return 'gotchas'; + return 'sessions'; // default +} + +// Filter icons for each category +const filterIcons: Record = { + all: Brain, + pr: GitPullRequest, + sessions: Lightbulb, + codebase: FolderTree, + patterns: Code, + gotchas: AlertTriangle +}; + export function MemoriesTab({ memoryStatus, memoryState, @@ -37,6 +78,32 @@ export function MemoriesTab({ onSearch }: MemoriesTabProps) { const [localSearchQuery, setLocalSearchQuery] = useState(''); + const [activeFilter, setActiveFilter] = useState('all'); + + // Calculate memory counts by category + const memoryCounts = useMemo(() => { + const counts: Record = { + all: recentMemories.length, + pr: 0, + sessions: 0, + codebase: 0, + patterns: 0, + gotchas: 0 + }; + + for (const memory of recentMemories) { + const category = getMemoryCategory(memory); + counts[category]++; + } + + return counts; + }, [recentMemories]); + + // Filter memories based on active filter + const filteredMemories = useMemo(() => { + if (activeFilter === 'all') return recentMemories; + return recentMemories.filter(memory => getMemoryCategory(memory) === activeFilter); + }, [recentMemories, activeFilter]); const handleSearch = () => { if (localSearchQuery.trim()) { @@ -77,17 +144,41 @@ export function MemoriesTab({ {memoryStatus?.available ? ( <> -
+
- - {memoryState && ( - - )} +
- {memoryState?.last_session && ( -

- Last session: #{memoryState.last_session} -

+ + {/* Memory Stats Summary */} + {recentMemories.length > 0 && ( +
+
+
+
{memoryCounts.all}
+
Total
+
+
+
{memoryCounts.pr}
+
PR Reviews
+
+
+
{memoryCounts.sessions}
+
Sessions
+
+
+
{memoryCounts.codebase}
+
Codebase
+
+
+
{memoryCounts.patterns}
+
Patterns
+
+
+
{memoryCounts.gotchas}
+
Gotchas
+
+
+
)} ) : ( @@ -145,30 +236,92 @@ export function MemoriesTab({ )}
- {/* Recent Memories */} + {/* Memory Browser */}
-

- Recent Memories -

+
+

+ Memory Browser +

+ + {filteredMemories.length} of {recentMemories.length} memories + +
+ + {/* Filter Pills */} +
+ {(Object.keys(memoryFilterCategories) as FilterCategory[]).map((category) => { + const config = memoryFilterCategories[category]; + const count = memoryCounts[category]; + const Icon = filterIcons[category]; + const isActive = activeFilter === category; + + return ( + + ); + })} +
+ {/* Memory List */} {memoriesLoading && (
)} - {!memoriesLoading && recentMemories.length === 0 && ( + {!memoriesLoading && filteredMemories.length === 0 && recentMemories.length === 0 && ( +
+ +

+ No memories recorded yet. Memories are created during AI agent sessions and PR reviews. +

+
+ )} + + {!memoriesLoading && filteredMemories.length === 0 && recentMemories.length > 0 && (

- No memories recorded yet. Memories are created during AI agent sessions. + No memories match the selected filter.

+
)} - {recentMemories.length > 0 && ( + {filteredMemories.length > 0 && (
- {recentMemories.map((memory) => ( + {filteredMemories.map((memory) => ( ))}
diff --git a/apps/frontend/src/renderer/components/context/MemoryCard.tsx b/apps/frontend/src/renderer/components/context/MemoryCard.tsx index ccb698e490..1da3ede0fc 100644 --- a/apps/frontend/src/renderer/components/context/MemoryCard.tsx +++ b/apps/frontend/src/renderer/components/context/MemoryCard.tsx @@ -14,8 +14,9 @@ import { Button } from '../ui/button'; import { Card, CardContent } from '../ui/card'; import { Badge } from '../ui/badge'; import type { MemoryEpisode } from '../../../shared/types'; -import { memoryTypeIcons } from './constants'; +import { memoryTypeIcons, memoryTypeColors, memoryTypeLabels } from './constants'; import { formatDate } from './utils'; +import { PRReviewCard } from './PRReviewCard'; interface MemoryCardProps { memory: MemoryEpisode; @@ -88,13 +89,28 @@ function ListItem({ children, variant = 'default' }: { children: React.ReactNode ); } +// Check if memory content looks like a PR review +function isPRReviewMemory(memory: MemoryEpisode): boolean { + // Check by type first + if (memory.type === 'pr_review' || memory.type === 'pr_finding' || + memory.type === 'pr_pattern' || memory.type === 'pr_gotcha') { + return true; + } + + // Check by content structure (for session_insight type that's actually a PR review) + try { + const parsed = JSON.parse(memory.content); + return parsed.prNumber !== undefined && parsed.verdict !== undefined; + } catch { + return false; + } +} + export function MemoryCard({ memory }: MemoryCardProps) { - const Icon = memoryTypeIcons[memory.type] || memoryTypeIcons.session_insight; const [expanded, setExpanded] = useState(false); - const parsed = useMemo(() => parseMemoryContent(memory.content), [memory.content]); - - // Determine if there's meaningful content to show + + // Determine if there's meaningful content to show (must be called before early return) const hasContent = useMemo(() => { if (!parsed) return false; const d = parsed.discoveries || {}; @@ -110,6 +126,15 @@ export function MemoryCard({ memory }: MemoryCardProps) { ); }, [parsed]); + // Delegate PR reviews to specialized component + if (isPRReviewMemory(memory)) { + return ; + } + + const Icon = memoryTypeIcons[memory.type] || memoryTypeIcons.session_insight; + const typeColor = memoryTypeColors[memory.type] || ''; + const typeLabel = memoryTypeLabels[memory.type] || memory.type.replace(/_/g, ' '); + const sessionLabel = memory.session_number ? `Session #${memory.session_number}` : parsed?.session_number @@ -129,8 +154,8 @@ export function MemoryCard({ memory }: MemoryCardProps) {
- - {memory.type.replace(/_/g, ' ')} + + {typeLabel} {sessionLabel && ( diff --git a/apps/frontend/src/renderer/components/context/PRReviewCard.tsx b/apps/frontend/src/renderer/components/context/PRReviewCard.tsx new file mode 100644 index 0000000000..c6a4a8b444 --- /dev/null +++ b/apps/frontend/src/renderer/components/context/PRReviewCard.tsx @@ -0,0 +1,319 @@ +import { useState, useMemo } from 'react'; +import { + Clock, + GitPullRequest, + CheckCircle, + XCircle, + MessageSquare, + ChevronDown, + ChevronUp, + AlertTriangle, + Bug, + Sparkles, + ExternalLink +} from 'lucide-react'; +import { Button } from '../ui/button'; +import { Card, CardContent } from '../ui/card'; +import { Badge } from '../ui/badge'; +import type { MemoryEpisode } from '../../../shared/types'; +import { formatDate } from './utils'; + +interface PRReviewCardProps { + memory: MemoryEpisode; +} + +interface ParsedPRReview { + prNumber: number; + repo: string; + verdict: 'approve' | 'request_changes' | 'comment'; + timestamp: string; + summary: { + verdict: string; + finding_counts: { + critical: number; + high: number; + medium: number; + low: number; + }; + total_findings: number; + }; + keyFindings: Array<{ + severity: string; + message: string; + file?: string; + line?: number; + }>; + patterns: string[]; + gotchas: string[]; + isFollowup: boolean; + previousReviews?: number; +} + +function parsePRReviewContent(content: string): ParsedPRReview | null { + try { + return JSON.parse(content); + } catch { + return null; + } +} + +function VerdictBadge({ verdict }: { verdict: string }) { + switch (verdict) { + case 'approve': + return ( + + + Approved + + ); + case 'request_changes': + return ( + + + Changes Requested + + ); + case 'comment': + return ( + + + Commented + + ); + default: + return ( + + {verdict} + + ); + } +} + +function SeverityBadge({ severity, count }: { severity: string; count: number }) { + if (count === 0) return null; + + const colorMap: Record = { + critical: 'bg-red-600/20 text-red-400 border-red-600/30', + high: 'bg-orange-500/20 text-orange-400 border-orange-500/30', + medium: 'bg-amber-500/20 text-amber-400 border-amber-500/30', + low: 'bg-blue-500/20 text-blue-400 border-blue-500/30' + }; + + return ( + + {count} {severity} + + ); +} + +export function PRReviewCard({ memory }: PRReviewCardProps) { + const [expanded, setExpanded] = useState(false); + const parsed = useMemo(() => parsePRReviewContent(memory.content), [memory.content]); + + if (!parsed) { + // Fallback for non-parseable content + return ( + + +
+ + PR Review + {formatDate(memory.timestamp)} +
+
+            {memory.content}
+          
+
+
+ ); + } + + const { finding_counts } = parsed.summary || { finding_counts: { critical: 0, high: 0, medium: 0, low: 0 } }; + const totalFindings = (finding_counts?.critical || 0) + (finding_counts?.high || 0) + + (finding_counts?.medium || 0) + (finding_counts?.low || 0); + const hasGotchas = parsed.gotchas && parsed.gotchas.length > 0; + const hasPatterns = parsed.patterns && parsed.patterns.length > 0; + const hasFindings = parsed.keyFindings && parsed.keyFindings.length > 0; + const hasExpandableContent = hasGotchas || hasPatterns || hasFindings; + + return ( + + + {/* Header */} +
+
+
+ +
+
+ {/* PR Info Row */} +
+ + PR #{parsed.prNumber} + + + {parsed.repo} + + {parsed.isFollowup && ( + + Follow-up + + )} +
+ + {/* Verdict & Stats Row */} +
+ + {totalFindings > 0 && ( + + {totalFindings} finding{totalFindings !== 1 ? 's' : ''} + + )} +
+ + {/* Severity Breakdown */} + {totalFindings > 0 && ( +
+ + + + +
+ )} + + {/* Timestamp */} +
+ + {formatDate(memory.timestamp)} +
+
+
+ + {/* Expand Button */} + {hasExpandableContent && ( + + )} +
+ + {/* Expanded Content */} + {expanded && ( +
+ {/* Key Findings */} + {hasFindings && ( +
+
+ + Key Findings + + {parsed.keyFindings.length} + +
+
+ {parsed.keyFindings.slice(0, 5).map((finding, idx) => ( +
+
+ + {finding.severity} + + {finding.file && ( + + {finding.file}{finding.line ? `:${finding.line}` : ''} + + )} +
+

{finding.message}

+
+ ))} + {parsed.keyFindings.length > 5 && ( +

+ +{parsed.keyFindings.length - 5} more findings +

+ )} +
+
+ )} + + {/* Gotchas */} + {hasGotchas && ( +
+
+ + Gotchas Discovered + + {parsed.gotchas.length} + +
+
    + {parsed.gotchas.map((gotcha, idx) => ( +
  • + {gotcha} +
  • + ))} +
+
+ )} + + {/* Patterns */} + {hasPatterns && ( +
+
+ + Patterns Identified + + {parsed.patterns.length} + +
+
+ {parsed.patterns.map((pattern, idx) => ( + + {pattern} + + ))} +
+
+ )} + + {/* Link to PR */} + {parsed.repo && parsed.prNumber && ( +
+ +
+ )} +
+ )} +
+
+ ); +} diff --git a/apps/frontend/src/renderer/components/context/constants.ts b/apps/frontend/src/renderer/components/context/constants.ts index 963afc361e..3905d06965 100644 --- a/apps/frontend/src/renderer/components/context/constants.ts +++ b/apps/frontend/src/renderer/components/context/constants.ts @@ -10,7 +10,11 @@ import { FolderTree, AlertTriangle, Smartphone, - Monitor + Monitor, + GitPullRequest, + Bug, + Sparkles, + Target } from 'lucide-react'; // Service type icon mapping @@ -45,5 +49,54 @@ export const memoryTypeIcons: Record = { codebase_discovery: FolderTree, codebase_map: FolderTree, pattern: Code, - gotcha: AlertTriangle + gotcha: AlertTriangle, + task_outcome: Target, + qa_result: Target, + historical_context: Lightbulb, + pr_review: GitPullRequest, + pr_finding: Bug, + pr_pattern: Sparkles, + pr_gotcha: AlertTriangle +}; + +// Memory type colors for badges and styling +export const memoryTypeColors: Record = { + session_insight: 'bg-amber-500/10 text-amber-400 border-amber-500/30', + codebase_discovery: 'bg-blue-500/10 text-blue-400 border-blue-500/30', + codebase_map: 'bg-blue-500/10 text-blue-400 border-blue-500/30', + pattern: 'bg-purple-500/10 text-purple-400 border-purple-500/30', + gotcha: 'bg-red-500/10 text-red-400 border-red-500/30', + task_outcome: 'bg-green-500/10 text-green-400 border-green-500/30', + qa_result: 'bg-teal-500/10 text-teal-400 border-teal-500/30', + historical_context: 'bg-slate-500/10 text-slate-400 border-slate-500/30', + pr_review: 'bg-cyan-500/10 text-cyan-400 border-cyan-500/30', + pr_finding: 'bg-orange-500/10 text-orange-400 border-orange-500/30', + pr_pattern: 'bg-purple-500/10 text-purple-400 border-purple-500/30', + pr_gotcha: 'bg-red-500/10 text-red-400 border-red-500/30' +}; + +// Memory type labels for display +export const memoryTypeLabels: Record = { + session_insight: 'Session Insight', + codebase_discovery: 'Codebase Discovery', + codebase_map: 'Codebase Map', + pattern: 'Pattern', + gotcha: 'Gotcha', + task_outcome: 'Task Outcome', + qa_result: 'QA Result', + historical_context: 'Historical Context', + pr_review: 'PR Review', + pr_finding: 'PR Finding', + pr_pattern: 'PR Pattern', + pr_gotcha: 'PR Gotcha' +}; + +// Filter categories for grouping memory types +export const memoryFilterCategories = { + all: { label: 'All', types: [] as string[] }, + pr: { label: 'PR Reviews', types: ['pr_review', 'pr_finding', 'pr_pattern', 'pr_gotcha'] }, + sessions: { label: 'Sessions', types: ['session_insight', 'task_outcome', 'qa_result', 'historical_context'] }, + codebase: { label: 'Codebase', types: ['codebase_discovery', 'codebase_map'] }, + patterns: { label: 'Patterns', types: ['pattern', 'pr_pattern'] }, + gotchas: { label: 'Gotchas', types: ['gotcha', 'pr_gotcha'] } }; diff --git a/apps/frontend/src/renderer/components/github-issues/components/InvestigationDialog.tsx b/apps/frontend/src/renderer/components/github-issues/components/InvestigationDialog.tsx index 6408829741..05b5456a21 100644 --- a/apps/frontend/src/renderer/components/github-issues/components/InvestigationDialog.tsx +++ b/apps/frontend/src/renderer/components/github-issues/components/InvestigationDialog.tsx @@ -144,7 +144,10 @@ export function InvestigationDialog({ {selectedCommentIds.length === comments.length ? 'Deselect All' : 'Select All'}
- +
{comments.map((comment) => (
@@ -60,8 +63,9 @@ export function IssueListHeader({ size="icon" onClick={onRefresh} disabled={isLoading} + aria-label={t('buttons.refresh')} > - +
diff --git a/apps/frontend/src/renderer/components/github-prs/GitHubPRs.tsx b/apps/frontend/src/renderer/components/github-prs/GitHubPRs.tsx index 553d89d507..c4460f8b8b 100644 --- a/apps/frontend/src/renderer/components/github-prs/GitHubPRs.tsx +++ b/apps/frontend/src/renderer/components/github-prs/GitHubPRs.tsx @@ -60,11 +60,14 @@ export function GitHubPRs({ onOpenSettings, isActive = false }: GitHubPRsProps) const { prs, isLoading, + isLoadingMore, + isLoadingPRDetails, error, selectedPRNumber, reviewResult, reviewProgress, isReviewing, + hasMore, selectPR, runReview, runFollowupReview, @@ -75,10 +78,11 @@ export function GitHubPRs({ onOpenSettings, isActive = false }: GitHubPRsProps) mergePR, assignPR, refresh, + loadMore, isConnected, repoFullName, getReviewStateForPR, - } = useGitHubPRs(selectedProject?.id); + } = useGitHubPRs(selectedProject?.id, { isActive }); const selectedPR = prs.find(pr => pr.number === selectedPRNumber); @@ -126,9 +130,9 @@ export function GitHubPRs({ onOpenSettings, isActive = false }: GitHubPRsProps) } }, [selectedPRNumber, cancelReview]); - const handlePostReview = useCallback(async (selectedFindingIds?: string[]): Promise => { + const handlePostReview = useCallback(async (selectedFindingIds?: string[], options?: { forceApprove?: boolean }): Promise => { if (selectedPRNumber && reviewResult) { - return await postReview(selectedPRNumber, selectedFindingIds); + return await postReview(selectedPRNumber, selectedFindingIds, options); } return false; }, [selectedPRNumber, reviewResult, postReview]); @@ -218,9 +222,12 @@ export function GitHubPRs({ onOpenSettings, isActive = false }: GitHubPRsProps) prs={filteredPRs} selectedPRNumber={selectedPRNumber} isLoading={isLoading} + isLoadingMore={isLoadingMore} + hasMore={hasMore} error={error} getReviewStateForPR={getReviewStateForPR} onSelectPR={selectPR} + onLoadMore={loadMore} />
} @@ -228,12 +235,14 @@ export function GitHubPRs({ onOpenSettings, isActive = false }: GitHubPRsProps) selectedPR ? ( void; onRunFollowupReview: () => void; onCheckNewCommits: () => Promise; onCancelReview: () => void; - onPostReview: (selectedFindingIds?: string[]) => Promise; + onPostReview: (selectedFindingIds?: string[], options?: { forceApprove?: boolean }) => Promise; onPostComment: (body: string) => void; onMergePR: (mergeMethod?: 'merge' | 'squash' | 'rebase') => void; onAssignPR: (username: string) => void; @@ -63,12 +67,14 @@ function getStatusColor(status: PRReviewResult['overallStatus']): string { export function PRDetail({ pr, + projectId, reviewResult, previousReviewResult, reviewProgress, isReviewing, initialNewCommitsCheck, - isActive = false, + isActive: _isActive = false, + isLoadingFiles = false, onRunReview, onRunFollowupReview, onCheckNewCommits, @@ -79,7 +85,7 @@ export function PRDetail({ onAssignPR: _onAssignPR, onGetLogs, }: PRDetailProps) { - const { t, i18n } = useTranslation('common'); + const { t } = useTranslation('common'); // Selection state for findings const [selectedFindingIds, setSelectedFindingIds] = useState>(new Set()); const [postedFindingIds, setPostedFindingIds] = useState>(new Set()); @@ -98,6 +104,15 @@ export function PRDetail({ const [prLogs, setPrLogs] = useState(null); const [isLoadingLogs, setIsLoadingLogs] = useState(false); const logsLoadedRef = useRef(false); + + // Merge readiness state (real-time validation of AI verdict freshness) + const [mergeReadiness, setMergeReadiness] = useState(null); + const mergeReadinessAbortRef = useRef(null); + + // Workflows awaiting approval state (for fork PRs) + const [workflowsAwaiting, setWorkflowsAwaiting] = useState(null); + const [isApprovingWorkflow, setIsApprovingWorkflow] = useState(null); + const [workflowsExpanded, setWorkflowsExpanded] = useState(true); // Sync with store's newCommitsCheck when it changes (e.g., when switching PRs or after refresh) // Always sync to keep local state in sync with store, including null values @@ -242,6 +257,95 @@ export function PRDetail({ setLogsExpanded(false); }, [pr.number]); + // Check for workflows awaiting approval (fork PRs) when PR changes or review completes + useEffect(() => { + const checkWorkflows = async () => { + try { + const result = await window.electronAPI.github.getWorkflowsAwaitingApproval( + '', // projectId will be resolved from active project + pr.number + ); + setWorkflowsAwaiting(result); + } catch { + setWorkflowsAwaiting(null); + } + }; + + checkWorkflows(); + // Re-check when a review is completed (CI status might have changed) + }, [pr.number, reviewResult]); + + // Check merge readiness (real-time validation) when PR is selected + // This runs on every PR selection to catch stale verdicts + useEffect(() => { + // Cancel any pending check + if (mergeReadinessAbortRef.current) { + mergeReadinessAbortRef.current.abort(); + } + mergeReadinessAbortRef.current = new AbortController(); + + const checkMergeReadiness = async () => { + if (!projectId) { + setMergeReadiness(null); + return; + } + + try { + const result = await window.electronAPI.github.checkMergeReadiness(projectId, pr.number); + // Only update if not aborted + if (!mergeReadinessAbortRef.current?.signal.aborted) { + setMergeReadiness(result); + } + } catch { + if (!mergeReadinessAbortRef.current?.signal.aborted) { + setMergeReadiness(null); + } + } + }; + + checkMergeReadiness(); + + return () => { + if (mergeReadinessAbortRef.current) { + mergeReadinessAbortRef.current.abort(); + } + }; + }, [pr.number, projectId]); + + // Handler to approve a workflow + const handleApproveWorkflow = useCallback(async (runId: number) => { + setIsApprovingWorkflow(runId); + try { + const success = await window.electronAPI.github.approveWorkflow('', runId); + if (success) { + // Refresh the workflows list after approval + const result = await window.electronAPI.github.getWorkflowsAwaitingApproval('', pr.number); + setWorkflowsAwaiting(result); + } + } finally { + setIsApprovingWorkflow(null); + } + }, [pr.number]); + + // Handler to approve all workflows at once + const handleApproveAllWorkflows = useCallback(async () => { + if (!workflowsAwaiting?.workflow_runs.length) return; + + for (const workflow of workflowsAwaiting.workflow_runs) { + setIsApprovingWorkflow(workflow.id); + try { + await window.electronAPI.github.approveWorkflow('', workflow.id); + } catch { + // Continue with other workflows even if one fails + } + } + setIsApprovingWorkflow(null); + + // Refresh the workflows list + const result = await window.electronAPI.github.getWorkflowsAwaitingApproval('', pr.number); + setWorkflowsAwaiting(result); + }, [pr.number, workflowsAwaiting]); + // Count selected findings by type for the button label const selectedCount = selectedFindingIds.size; @@ -458,7 +562,7 @@ export function PRDetail({ } }; - // Auto-approval for clean PRs - posts LOW findings as suggestions + approval comment + // Auto-approval for clean PRs - posts approval with LOW findings as suggestions in a SINGLE comment // NOTE: GitHub PR comments are intentionally in English as it's the lingua franca // for code reviews and GitHub's international developer community. The comment // content is meant to be read by contributors who may have different locales. @@ -466,40 +570,15 @@ export function PRDetail({ if (!reviewResult) return; setIsPosting(true); try { - // Step 1: Post any LOW findings as non-blocking suggestions + // Post approval with suggestions in a single review comment + // This uses forceApprove to set APPROVE status even with LOW findings const lowFindingIds = lowSeverityFindings.map(f => f.id); - if (lowFindingIds.length > 0) { - const success = await onPostReview(lowFindingIds); - if (!success) { - // Failed to post findings, don't proceed with approval - return; - } - // Mark them as posted locally + + const success = await onPostReview(lowFindingIds, { forceApprove: true }); + if (success && lowFindingIds.length > 0) { + // Mark findings as posted locally setPostedFindingIds(prev => new Set([...prev, ...lowFindingIds])); } - - // Step 2: Post the approval comment - const findingsNote = lowFindingIds.length > 0 - ? `- ${lowFindingIds.length} low-severity suggestion${lowFindingIds.length !== 1 ? 's' : ''} posted above` - : '- No issues found'; - - const approvalMessage = `## Auto Claude Review - APPROVED - -**Status:** Ready to Merge - -**Summary:** ${reviewResult.summary} - ---- -**Review Details:** -${findingsNote} -- Reviewed at: ${formatDate(reviewResult.reviewedAt, i18n.language)} -${reviewResult.isFollowupReview ? `- Follow-up review: All previous blocking issues resolved` : ''} - -*This automated review found no blocking issues. The PR can be safely merged.* - ---- -*Generated by Auto Claude*`; - await onPostComment(approvalMessage); } finally { setIsPosting(false); } @@ -519,7 +598,36 @@ ${reviewResult.isFollowupReview ? `- Follow-up review: All previous blocking iss
{/* Refactored Header */} - + + + {/* Merge Readiness Warning Banner - shows when real-time status contradicts AI verdict */} + {mergeReadiness && mergeReadiness.blockers.length > 0 && reviewResult?.success && ( + prStatus.status === 'ready_to_merge' || prStatus.status === 'reviewed_pending_post' + ) && ( + + +
+ +
+

+ {t('prReview.verdictOutdated', 'AI verdict may be outdated')} +

+
    + {mergeReadiness.blockers.map((blocker, idx) => ( +
  • + + {blocker} +
  • + ))} +
+

+ {t('prReview.rerunReviewSuggestion', 'Consider re-running the review after resolving these issues.')} +

+
+
+
+
+ )} {/* Review Status & Actions */} )} - {/* Auto-approve for clean PRs (only LOW findings or no findings) */} - {isCleanReview && ( + {/* Approve button - consolidated logic to avoid duplicate buttons */} + {/* Don't show when overallStatus is 'request_changes' (e.g., workflows blocked, or other issues) */} + {isCleanReview && !hasPostedFindings && reviewResult?.overallStatus !== 'request_changes' && ( - - + {/* Manual approve button - only show for non-clean reviews that are ready to merge */} + {/* isReadyToMerge already checks for 'approve' status, so no need for additional check */} + {isReadyToMerge && !isCleanReview && !hasPostedFindings && ( + + )} + + {/* Merge button - only show after approval has been posted */} + {hasPostedFindings && ( + )} {postSuccess && ( @@ -646,7 +767,7 @@ ${reviewResult.isFollowupReview ? `- Follow-up review: All previous blocking iss
{/* Follow-up Review Resolution Status */} {reviewResult.isFollowupReview && ( -
+
{(reviewResult.resolvedFindings?.length ?? 0) > 0 && ( @@ -665,6 +786,21 @@ ${reviewResult.isFollowupReview ? `- Follow-up review: All previous blocking iss {t('prReview.newIssue', { count: reviewResult.newFindingsSinceLastReview?.length ?? 0 })} )} + {/* Re-run follow-up review button */} +
)} @@ -698,6 +834,94 @@ ${reviewResult.isFollowupReview ? `- Follow-up review: All previous blocking iss )} + {/* Workflows Awaiting Approval - for fork PRs */} + {workflowsAwaiting && workflowsAwaiting.awaiting_approval > 0 && ( + } + badge={ + + + {t('prReview.blockedByWorkflows')} + + } + open={workflowsExpanded} + onOpenChange={setWorkflowsExpanded} + > +
+

+ {t('prReview.workflowsAwaitingDescription')} +

+ +
+ {workflowsAwaiting.workflow_runs.map((workflow) => ( +
+
+ +
+ + {workflow.workflow_name} + + + {workflow.name} + +
+
+
+ + +
+
+ ))} +
+ + {workflowsAwaiting.workflow_runs.length > 1 && ( +
+ +
+ )} +
+
+ )} + {/* Review Logs - show during review or after completion */} {(reviewResult || isReviewing) && ( 0; return (
@@ -69,11 +93,27 @@ export function PRHeader({ pr }: PRHeaderProps) {
-
- + {/* Clickable files indicator */} +
+ {hasFiles && ( + showFiles ? : + )} +
+{pr.additions} @@ -84,6 +124,52 @@ export function PRHeader({ pr }: PRHeaderProps) {
+ + {/* Collapsible file list */} + {showFiles && ( +
+ {isLoadingFiles ? ( +
+ + {t('prReview.loadingFiles')} +
+ ) : hasFiles ? ( +
+ {pr.files.map((file, index) => ( +
+ + + {file.path} + + + {file.status} + +
+ + + {file.additions} + + + + {file.deletions} + +
+
+ ))} +
+ ) : ( +
+ {t('prReview.noFilesAvailable')} +
+ )} +
+ )}
); } diff --git a/apps/frontend/src/renderer/components/github-prs/components/PRList.tsx b/apps/frontend/src/renderer/components/github-prs/components/PRList.tsx index c4f67ba4ee..8ee076751d 100644 --- a/apps/frontend/src/renderer/components/github-prs/components/PRList.tsx +++ b/apps/frontend/src/renderer/components/github-prs/components/PRList.tsx @@ -1,4 +1,5 @@ -import { GitPullRequest, User, Clock, FileDiff } from 'lucide-react'; +import { useRef, useEffect, useCallback } from 'react'; +import { GitPullRequest, User, Clock, FileDiff, Loader2 } from 'lucide-react'; import { ScrollArea } from '../../ui/scroll-area'; import { Badge } from '../../ui/badge'; import { cn } from '../../../lib/utils'; @@ -166,9 +167,12 @@ interface PRListProps { prs: PRData[]; selectedPRNumber: number | null; isLoading: boolean; + isLoadingMore: boolean; + hasMore: boolean; error: string | null; getReviewStateForPR: (prNumber: number) => PRReviewInfo | null; onSelectPR: (prNumber: number) => void; + onLoadMore: () => void; } function formatDate(dateString: string): string { @@ -191,8 +195,45 @@ function formatDate(dateString: string): string { return date.toLocaleDateString(); } -export function PRList({ prs, selectedPRNumber, isLoading, error, getReviewStateForPR, onSelectPR }: PRListProps) { +export function PRList({ + prs, + selectedPRNumber, + isLoading, + isLoadingMore, + hasMore, + error, + getReviewStateForPR, + onSelectPR, + onLoadMore +}: PRListProps) { const { t } = useTranslation('common'); + const scrollAreaRef = useRef(null); + const loadMoreTriggerRef = useRef(null); + + // Intersection Observer for infinite scroll + const handleIntersection = useCallback((entries: IntersectionObserverEntry[]) => { + const [entry] = entries; + if (entry.isIntersecting && hasMore && !isLoadingMore && !isLoading) { + onLoadMore(); + } + }, [hasMore, isLoadingMore, isLoading, onLoadMore]); + + useEffect(() => { + const trigger = loadMoreTriggerRef.current; + if (!trigger) return; + + const observer = new IntersectionObserver(handleIntersection, { + root: null, // Use viewport as root + rootMargin: '100px', // Start loading 100px before reaching the bottom + threshold: 0 + }); + + observer.observe(trigger); + + return () => { + observer.disconnect(); + }; + }, [handleIntersection]); if (isLoading && prs.length === 0) { return ( @@ -227,7 +268,7 @@ export function PRList({ prs, selectedPRNumber, isLoading, error, getReviewState } return ( - +
{prs.map((pr) => { const reviewState = getReviewStateForPR(pr.number); @@ -296,6 +337,24 @@ export function PRList({ prs, selectedPRNumber, isLoading, error, getReviewState ); })} + + {/* Load more trigger / Loading indicator */} +
+ {isLoadingMore ? ( +
+ + {t('prReview.loadingMore')} +
+ ) : hasMore ? ( + + {t('prReview.scrollForMore')} + + ) : prs.length > 0 ? ( + + {t('prReview.allPRsLoaded')} + + ) : null} +
); diff --git a/apps/frontend/src/renderer/components/github-prs/components/ReviewStatusTree.tsx b/apps/frontend/src/renderer/components/github-prs/components/ReviewStatusTree.tsx index f9d81ac812..4dc8485ff8 100644 --- a/apps/frontend/src/renderer/components/github-prs/components/ReviewStatusTree.tsx +++ b/apps/frontend/src/renderer/components/github-prs/components/ReviewStatusTree.tsx @@ -1,5 +1,5 @@ import { useState } from 'react'; -import { CheckCircle, Circle, CircleDot, Play } from 'lucide-react'; +import { CheckCircle, Circle, CircleDot, Play, RefreshCw } from 'lucide-react'; import { useTranslation } from 'react-i18next'; import { Button } from '../../ui/button'; import { cn } from '../../../lib/utils'; @@ -143,7 +143,18 @@ export function ReviewStatusTree({ id: 'analysis', label: t('prReview.analysisComplete', { count: reviewResult.findings.length }), status: 'completed', - date: reviewResult.reviewedAt + date: reviewResult.reviewedAt, + action: ( + + ) }); } diff --git a/apps/frontend/src/renderer/components/github-prs/hooks/useGitHubPRs.ts b/apps/frontend/src/renderer/components/github-prs/hooks/useGitHubPRs.ts index 62e4cc4eef..8116f73e71 100644 --- a/apps/frontend/src/renderer/components/github-prs/hooks/useGitHubPRs.ts +++ b/apps/frontend/src/renderer/components/github-prs/hooks/useGitHubPRs.ts @@ -1,4 +1,4 @@ -import { useState, useEffect, useCallback, useMemo } from 'react'; +import { useState, useEffect, useCallback, useMemo, useRef } from 'react'; import type { PRData, PRReviewResult, @@ -11,9 +11,16 @@ import { usePRReviewStore, startPRReview as storeStartPRReview, startFollowupRev export type { PRData, PRReviewResult, PRReviewProgress }; export type { PRReviewFinding } from '../../../../preload/api/modules/github-api'; +interface UseGitHubPRsOptions { + /** Whether the component is currently active/visible */ + isActive?: boolean; +} + interface UseGitHubPRsResult { prs: PRData[]; isLoading: boolean; + isLoadingMore: boolean; + isLoadingPRDetails: boolean; // Loading full PR details including files error: string | null; selectedPR: PRData | null; selectedPRNumber: number | null; @@ -23,26 +30,39 @@ interface UseGitHubPRsResult { isConnected: boolean; repoFullName: string | null; activePRReviews: number[]; // PR numbers currently being reviewed + hasMore: boolean; // Whether there are more PRs to load selectPR: (prNumber: number | null) => void; refresh: () => Promise; + loadMore: () => Promise; runReview: (prNumber: number) => Promise; runFollowupReview: (prNumber: number) => Promise; checkNewCommits: (prNumber: number) => Promise; cancelReview: (prNumber: number) => Promise; - postReview: (prNumber: number, selectedFindingIds?: string[]) => Promise; + postReview: (prNumber: number, selectedFindingIds?: string[], options?: { forceApprove?: boolean }) => Promise; postComment: (prNumber: number, body: string) => Promise; mergePR: (prNumber: number, mergeMethod?: 'merge' | 'squash' | 'rebase') => Promise; assignPR: (prNumber: number, username: string) => Promise; getReviewStateForPR: (prNumber: number) => { isReviewing: boolean; progress: PRReviewProgress | null; result: PRReviewResult | null; previousResult: PRReviewResult | null; error: string | null; newCommitsCheck?: NewCommitsCheck | null } | null; } -export function useGitHubPRs(projectId?: string): UseGitHubPRsResult { +export function useGitHubPRs(projectId?: string, options: UseGitHubPRsOptions = {}): UseGitHubPRsResult { + const { isActive = true } = options; const [prs, setPrs] = useState([]); const [isLoading, setIsLoading] = useState(false); + const [isLoadingMore, setIsLoadingMore] = useState(false); + const [isLoadingPRDetails, setIsLoadingPRDetails] = useState(false); const [error, setError] = useState(null); const [selectedPRNumber, setSelectedPRNumber] = useState(null); + const [selectedPRDetails, setSelectedPRDetails] = useState(null); const [isConnected, setIsConnected] = useState(false); const [repoFullName, setRepoFullName] = useState(null); + const [currentPage, setCurrentPage] = useState(1); + const [hasMore, setHasMore] = useState(true); + + // Track previous isActive state to detect tab navigation + const wasActiveRef = useRef(isActive); + // Track if initial load has happened + const hasLoadedRef = useRef(false); // Get PR review state from the global store const prReviews = usePRReviewStore((state) => state.prReviews); @@ -82,13 +102,18 @@ export function useGitHubPRs(projectId?: string): UseGitHubPRsResult { }; }, [projectId, prReviews, getPRReviewState]); - const selectedPR = prs.find(pr => pr.number === selectedPRNumber) || null; + // Use detailed PR data if available (includes files), otherwise fall back to list data + const selectedPR = selectedPRDetails || prs.find(pr => pr.number === selectedPRNumber) || null; // Check connection and fetch PRs - const fetchPRs = useCallback(async () => { + const fetchPRs = useCallback(async (page: number = 1, append: boolean = false) => { if (!projectId) return; - setIsLoading(true); + if (append) { + setIsLoadingMore(true); + } else { + setIsLoading(true); + } setError(null); try { @@ -99,55 +124,44 @@ export function useGitHubPRs(projectId?: string): UseGitHubPRsResult { setRepoFullName(connectionResult.data.repoFullName || null); if (connectionResult.data.connected) { - // Fetch PRs - const result = await window.electronAPI.github.listPRs(projectId); + // Fetch PRs with pagination + const result = await window.electronAPI.github.listPRs(projectId, page); if (result) { - setPrs(result); + // Check if there are more PRs to load (GitHub returns up to 100 per page) + setHasMore(result.length === 100); + setCurrentPage(page); + + if (append) { + // Append to existing PRs, deduplicating by PR number + setPrs(prevPrs => { + const existingNumbers = new Set(prevPrs.map(pr => pr.number)); + const newPrs = result.filter(pr => !existingNumbers.has(pr.number)); + return [...prevPrs, ...newPrs]; + }); + } else { + setPrs(result); + } - // Preload review results for all PRs - const preloadPromises = result.map(async (pr) => { + // Batch preload review results for PRs not in store (single IPC call) + const prsNeedingPreload = result.filter(pr => { const existingState = getPRReviewState(projectId, pr.number); - // Only fetch from disk if we don't have a result in the store - if (!existingState?.result) { - const reviewResult = await window.electronAPI.github.getPRReview(projectId, pr.number); + return !existingState?.result; + }); + + if (prsNeedingPreload.length > 0) { + const prNumbers = prsNeedingPreload.map(pr => pr.number); + const batchReviews = await window.electronAPI.github.getPRReviewsBatch(projectId, prNumbers); + + // Update store with loaded results + for (const reviewResult of Object.values(batchReviews)) { if (reviewResult) { - // Update store with the loaded result - // Preserve newCommitsCheck during preload to avoid race condition with new commits check usePRReviewStore.getState().setPRReviewResult(projectId, reviewResult, { preserveNewCommitsCheck: true }); - return { prNumber: pr.number, reviewResult }; } - } else { - return { prNumber: pr.number, reviewResult: existingState.result }; } - return null; - }); - - // Wait for all preloads to complete, then check for new commits - const preloadResults = await Promise.all(preloadPromises); - - // Check for new commits on PRs that have been reviewed - // (either has reviewedCommitSha or the snake_case variant from older reviews) - const prsWithReviews = preloadResults.filter( - (r): r is { prNumber: number; reviewResult: PRReviewResult } => - r !== null && - (!!r.reviewResult?.reviewedCommitSha || !!(r.reviewResult as any)?.reviewed_commit_sha) - ); - - if (prsWithReviews.length > 0) { - // Check new commits in parallel for all reviewed PRs - await Promise.all( - prsWithReviews.map(async ({ prNumber }) => { - try { - const newCommitsResult = await window.electronAPI.github.checkNewCommits(projectId, prNumber); - // Use the action from the hook subscription to ensure proper React re-renders - setNewCommitsCheckAction(projectId, prNumber, newCommitsResult); - } catch (err) { - // Silently fail for individual PR checks - don't block the list - console.warn(`Failed to check new commits for PR #${prNumber}:`, err); - } - }) - ); } + + // Note: New commits check is now lazy - only done when user selects a PR + // or explicitly triggers a check. This significantly speeds up list loading. } } } else { @@ -160,12 +174,39 @@ export function useGitHubPRs(projectId?: string): UseGitHubPRsResult { setIsConnected(false); } finally { setIsLoading(false); + setIsLoadingMore(false); } }, [projectId, getPRReviewState, setNewCommitsCheckAction]); + // Initial load useEffect(() => { - fetchPRs(); - }, [fetchPRs]); + if (projectId && !hasLoadedRef.current) { + hasLoadedRef.current = true; + fetchPRs(1, false); + } + }, [projectId, fetchPRs]); + + // Auto-refresh when tab becomes active (navigating to GitHub PRs tab) + useEffect(() => { + // Only refresh if transitioning from inactive to active AND we've loaded before + if (isActive && !wasActiveRef.current && hasLoadedRef.current) { + // Reset to first page and refresh + setCurrentPage(1); + setHasMore(true); + fetchPRs(1, false); + } + wasActiveRef.current = isActive; + }, [isActive, fetchPRs]); + + // Reset pagination and selected PR when project changes + useEffect(() => { + hasLoadedRef.current = false; + setCurrentPage(1); + setHasMore(true); + setPrs([]); + setSelectedPRNumber(null); + setSelectedPRDetails(null); + }, [projectId]); // No need for local IPC listeners - they're handled globally in github-store @@ -174,8 +215,29 @@ export function useGitHubPRs(projectId?: string): UseGitHubPRsResult { // Note: Don't reset review result - it comes from the store now // and persists across navigation - // Load existing review from disk if not already in store + // Clear previous detailed PR data when deselecting + if (prNumber === null) { + setSelectedPRDetails(null); + return; + } + if (prNumber && projectId) { + // Fetch full PR details including files + setIsLoadingPRDetails(true); + window.electronAPI.github.getPR(projectId, prNumber) + .then(prDetails => { + if (prDetails) { + setSelectedPRDetails(prDetails); + } + }) + .catch(err => { + console.warn(`Failed to fetch PR details for #${prNumber}:`, err); + }) + .finally(() => { + setIsLoadingPRDetails(false); + }); + + // Load existing review from disk if not already in store const existingState = getPRReviewState(projectId, prNumber); // Only fetch from disk if we don't have a result in the store if (!existingState?.result) { @@ -184,16 +246,44 @@ export function useGitHubPRs(projectId?: string): UseGitHubPRsResult { // Update store with the loaded result // Preserve newCommitsCheck when loading existing review from disk usePRReviewStore.getState().setPRReviewResult(projectId, result, { preserveNewCommitsCheck: true }); + + // Always check for new commits when selecting a reviewed PR + // This ensures fresh data even if we have a cached check from earlier in the session + const reviewedCommitSha = result.reviewedCommitSha || (result as any).reviewed_commit_sha; + if (reviewedCommitSha) { + window.electronAPI.github.checkNewCommits(projectId, prNumber).then(newCommitsResult => { + setNewCommitsCheckAction(projectId, prNumber, newCommitsResult); + }).catch(err => { + console.warn(`Failed to check new commits for PR #${prNumber}:`, err); + }); + } } }); + } else if (existingState?.result) { + // Review already in store - always check for new commits to get fresh status + const reviewedCommitSha = existingState.result.reviewedCommitSha || (existingState.result as any).reviewed_commit_sha; + if (reviewedCommitSha) { + window.electronAPI.github.checkNewCommits(projectId, prNumber).then(newCommitsResult => { + setNewCommitsCheckAction(projectId, prNumber, newCommitsResult); + }).catch(err => { + console.warn(`Failed to check new commits for PR #${prNumber}:`, err); + }); + } } } - }, [projectId, getPRReviewState]); + }, [projectId, getPRReviewState, setNewCommitsCheckAction]); const refresh = useCallback(async () => { - await fetchPRs(); + setCurrentPage(1); + setHasMore(true); + await fetchPRs(1, false); }, [fetchPRs]); + const loadMore = useCallback(async () => { + if (!hasMore || isLoadingMore || isLoading) return; + await fetchPRs(currentPage + 1, true); + }, [fetchPRs, hasMore, isLoadingMore, isLoading, currentPage]); + const runReview = useCallback(async (prNumber: number) => { if (!projectId) return; @@ -241,11 +331,11 @@ export function useGitHubPRs(projectId?: string): UseGitHubPRsResult { } }, [projectId]); - const postReview = useCallback(async (prNumber: number, selectedFindingIds?: string[]): Promise => { + const postReview = useCallback(async (prNumber: number, selectedFindingIds?: string[], options?: { forceApprove?: boolean }): Promise => { if (!projectId) return false; try { - const success = await window.electronAPI.github.postPRReview(projectId, prNumber, selectedFindingIds); + const success = await window.electronAPI.github.postPRReview(projectId, prNumber, selectedFindingIds, options); if (success) { // Reload review result to get updated postedAt and finding status const result = await window.electronAPI.github.getPRReview(projectId, prNumber); @@ -307,6 +397,8 @@ export function useGitHubPRs(projectId?: string): UseGitHubPRsResult { return { prs, isLoading, + isLoadingMore, + isLoadingPRDetails, error, selectedPR, selectedPRNumber, @@ -316,8 +408,10 @@ export function useGitHubPRs(projectId?: string): UseGitHubPRsResult { isConnected, repoFullName, activePRReviews, + hasMore, selectPR, refresh, + loadMore, runReview, runFollowupReview, checkNewCommits, diff --git a/apps/frontend/src/renderer/components/gitlab-issues/components/InvestigationDialog.tsx b/apps/frontend/src/renderer/components/gitlab-issues/components/InvestigationDialog.tsx index 4910547c51..534d37a46c 100644 --- a/apps/frontend/src/renderer/components/gitlab-issues/components/InvestigationDialog.tsx +++ b/apps/frontend/src/renderer/components/gitlab-issues/components/InvestigationDialog.tsx @@ -141,7 +141,10 @@ export function InvestigationDialog({ {selectedNoteIds.length === notes.length ? t('investigation.deselectAll') : t('investigation.selectAll')}
- +
{notes.map((note) => ( +
+ ); + } +})); + +describe('AuthChoiceStep', () => { + beforeEach(() => { + vi.clearAllMocks(); + // Reset profiles state to ensure clean state for each test + mockProfiles = []; + }); + + describe('Rendering', () => { + it('should render the auth choice step with all elements', () => { + render( + + ); + + // Check for heading + expect(screen.getByText('Choose Your Authentication Method')).toBeInTheDocument(); + + // Check for OAuth option + expect(screen.getByText('Sign in with Anthropic')).toBeInTheDocument(); + + // Check for API Key option + expect(screen.getByText('Use Custom API Key')).toBeInTheDocument(); + + // Check for skip button + expect(screen.getByText('Skip for now')).toBeInTheDocument(); + }); + + it('should display two auth option cards with equal visual weight', () => { + const { container } = render( + + ); + + // Check for grid layout with two columns + const grid = container.querySelector('.grid'); + expect(grid).toBeInTheDocument(); + expect(grid?.className).toContain('lg:grid-cols-2'); + }); + + it('should show icons for each auth option', () => { + render( + + ); + + // Both cards should have icon containers + const iconContainers = document.querySelectorAll('.bg-primary\\/10'); + expect(iconContainers.length).toBeGreaterThanOrEqual(2); + }); + }); + + describe('OAuth Button Handler', () => { + it('should call onNext when OAuth button is clicked', () => { + render( + + ); + + const oauthButton = screen.getByText('Sign in with Anthropic').closest('.cursor-pointer'); + fireEvent.click(oauthButton!); + + expect(mockGoToNext).toHaveBeenCalledTimes(1); + }); + + it('should proceed to oauth step when OAuth is selected', () => { + render( + + ); + + const oauthButton = screen.getByText('Sign in with Anthropic').closest('.cursor-pointer'); + fireEvent.click(oauthButton!); + + expect(mockGoToNext).toHaveBeenCalled(); + expect(mockOnAPIKeyPathComplete).not.toHaveBeenCalled(); + }); + }); + + describe('API Key Button Handler', () => { + it('should open ProfileEditDialog when API Key button is clicked', () => { + render( + + ); + + const apiKeyButton = screen.getByText('Use Custom API Key').closest('.cursor-pointer'); + fireEvent.click(apiKeyButton!); + + // ProfileEditDialog should be rendered + expect(screen.getByTestId('profile-edit-dialog')).toBeInTheDocument(); + }); + + it('should accept onAPIKeyPathComplete callback prop', async () => { + // This test verifies the component accepts the callback prop + // Full integration testing of profile creation detection requires E2E tests + // due to the complex state management between dialog and store + mockProfiles = []; + + render( + + ); + + // Click API Key button to open dialog + const apiKeyButton = screen.getByText('Use Custom API Key').closest('.cursor-pointer'); + fireEvent.click(apiKeyButton!); + + // Dialog should be open - verifies the API key path works + expect(screen.getByTestId('profile-edit-dialog')).toBeInTheDocument(); + + // Close dialog without creating profile + const closeButton = screen.getByText('Close Dialog'); + fireEvent.click(closeButton); + + // Callback should NOT be called when no profile was created (profiles still empty) + expect(mockOnAPIKeyPathComplete).not.toHaveBeenCalled(); + }); + }); + + describe('Skip Button Handler', () => { + it('should call onSkip when skip button is clicked', () => { + render( + + ); + + const skipButton = screen.getByText('Skip for now'); + fireEvent.click(skipButton); + + expect(mockSkipWizard).toHaveBeenCalledTimes(1); + }); + + it('should have ghost variant for skip button', () => { + render( + + ); + + const skipButton = screen.getByText('Skip for now'); + // Ghost variant buttons have specific styling classes + expect(skipButton.className).toContain('text-muted-foreground'); + expect(skipButton.className).toContain('hover:text-foreground'); + }); + }); + + describe('Visual Consistency', () => { + it('should follow WelcomeStep visual pattern', () => { + const { container } = render( + + ); + + // Check for container with proper classes + const mainContainer = container.querySelector('.flex.h-full.flex-col'); + expect(mainContainer).toBeInTheDocument(); + + // Check for max-w-2xl content wrapper + const contentWrapper = container.querySelector('.max-w-2xl'); + expect(contentWrapper).toBeInTheDocument(); + + // Check for centered text + const centeredText = container.querySelector('.text-center'); + expect(centeredText).toBeInTheDocument(); + }); + + it('should display hero icon with shield', () => { + const { container } = render( + + ); + + // Shield icon should be in a circle + const heroIcon = container.querySelector('.h-16.w-16'); + expect(heroIcon).toBeInTheDocument(); + }); + }); + + describe('Accessibility', () => { + it('should have descriptive text for each auth option', () => { + render( + + ); + + // OAuth option description + expect(screen.getByText(/Use your Anthropic account to authenticate/)).toBeInTheDocument(); + + // API Key option description + expect(screen.getByText(/Bring your own API key/)).toBeInTheDocument(); + }); + + it('should have helper text explaining both options', () => { + render( + + ); + + expect(screen.getByText(/Both options provide full access to Claude Code features/)).toBeInTheDocument(); + }); + }); + + describe('AC Coverage', () => { + it('AC1: should display first-run screen with two clear options', () => { + render( + + ); + + // Two main options visible + expect(screen.getByText('Sign in with Anthropic')).toBeInTheDocument(); + expect(screen.getByText('Use Custom API Key')).toBeInTheDocument(); + + // Both should be clickable cards + const cards = document.querySelectorAll('.cursor-pointer'); + expect(cards.length).toBeGreaterThanOrEqual(2); + }); + }); +}); diff --git a/apps/frontend/src/renderer/components/onboarding/AuthChoiceStep.tsx b/apps/frontend/src/renderer/components/onboarding/AuthChoiceStep.tsx new file mode 100644 index 0000000000..ca0c50be6a --- /dev/null +++ b/apps/frontend/src/renderer/components/onboarding/AuthChoiceStep.tsx @@ -0,0 +1,171 @@ +import { useState, useEffect, useRef } from 'react'; +import { LogIn, Key, Shield } from 'lucide-react'; +import { Button } from '../ui/button'; +import { Card, CardContent } from '../ui/card'; +import { ProfileEditDialog } from '../settings/ProfileEditDialog'; +import { useSettingsStore } from '../../stores/settings-store'; + +interface AuthChoiceStepProps { + onNext: () => void; + onBack: () => void; + onSkip: () => void; + onAPIKeyPathComplete?: () => void; // Called when profile is created (skips oauth) +} + +interface AuthOptionCardProps { + icon: React.ReactNode; + title: string; + description: string; + onClick: () => void; + variant?: 'default' | 'oauth'; + 'data-testid'?: string; +} + +function AuthOptionCard({ icon, title, description, onClick, variant = 'default', 'data-testid': dataTestId }: AuthOptionCardProps) { + return ( + + +
+
+ {icon} +
+
+

{title}

+

{description}

+
+
+
+
+ ); +} + +/** + * AuthChoiceStep component for the onboarding wizard. + * + * Allows new users to choose between: + * 1. OAuth authentication (Sign in with Anthropic) + * 2. Custom API key authentication (Use Custom API Key) + * + * Features: + * - Two equal-weight authentication options + * - Skip button for users who want to configure later + * - API key path opens ProfileEditDialog for profile creation + * - OAuth path proceeds to OAuthStep + * + * AC Coverage: + * - AC1: Displays first-run screen with two clear options + */ +export function AuthChoiceStep({ onNext, onBack, onSkip, onAPIKeyPathComplete }: AuthChoiceStepProps) { + const [isProfileDialogOpen, setIsProfileDialogOpen] = useState(false); + const profiles = useSettingsStore((state) => state.profiles); + + // Track initial profiles length to detect new profile creation + const initialProfilesLengthRef = useRef(profiles.length); + + // Update the ref when profiles change (to track the initial state before dialog opened) + useEffect(() => { + // Only update the ref when dialog is NOT open + // This captures the state before user opens the dialog + if (!isProfileDialogOpen) { + initialProfilesLengthRef.current = profiles.length; + } + }, [profiles.length, isProfileDialogOpen]); + + // OAuth button handler - proceeds to OAuth step + const handleOAuthChoice = () => { + onNext(); + }; + + // API Key button handler - opens profile dialog + const handleAPIKeyChoice = () => { + setIsProfileDialogOpen(true); + }; + + // Profile dialog close handler - detects profile creation and skips oauth step + const handleProfileDialogClose = (open: boolean) => { + const wasEmpty = initialProfilesLengthRef.current === 0; + const hasProfilesNow = profiles.length > 0; + + setIsProfileDialogOpen(open); + + // If dialog closed and profile was created (was empty, now has profiles), skip to graphiti step + if (!open && wasEmpty && hasProfilesNow && onAPIKeyPathComplete) { + // Call the callback to skip oauth and go directly to graphiti + onAPIKeyPathComplete(); + } + }; + + return ( + <> +
+
+ {/* Hero Section */} +
+
+
+ +
+
+

+ Choose Your Authentication Method +

+

+ Select how you want to authenticate with Claude. You can change this later in Settings. +

+
+ + {/* Authentication Options - Equal Visual Weight */} +
+ } + title="Sign in with Anthropic" + description="Use your Anthropic account to authenticate. Simple and secure OAuth flow." + onClick={handleOAuthChoice} + variant="oauth" + data-testid="auth-option-oauth" + /> + } + title="Use Custom API Key" + description="Bring your own API key from Anthropic or a compatible API provider. โš ๏ธ Highly experimental โ€” may incur significant costs." + onClick={handleAPIKeyChoice} + data-testid="auth-option-apikey" + /> +
+ + {/* Info text */} +
+

+ Both options provide full access to Claude Code features. Choose based on your preference. +

+
+ + {/* Skip Button */} +
+ +
+
+
+ + {/* Profile Edit Dialog for API Key Path */} + + + ); +} diff --git a/apps/frontend/src/renderer/components/onboarding/GraphitiStep.tsx b/apps/frontend/src/renderer/components/onboarding/GraphitiStep.tsx index b9f57654dc..dca6465f41 100644 --- a/apps/frontend/src/renderer/components/onboarding/GraphitiStep.tsx +++ b/apps/frontend/src/renderer/components/onboarding/GraphitiStep.tsx @@ -111,7 +111,7 @@ interface ValidationStatus { export function GraphitiStep({ onNext, onBack, onSkip }: GraphitiStepProps) { const { settings, updateSettings } = useSettingsStore(); const [config, setConfig] = useState({ - enabled: false, + enabled: true, // Enabled by default for better first-time experience database: 'auto_claude_memory', dbPath: '', llmProvider: 'openai', diff --git a/apps/frontend/src/renderer/components/onboarding/MemoryStep.tsx b/apps/frontend/src/renderer/components/onboarding/MemoryStep.tsx index 1cab32a0b9..0c792c6cd1 100644 --- a/apps/frontend/src/renderer/components/onboarding/MemoryStep.tsx +++ b/apps/frontend/src/renderer/components/onboarding/MemoryStep.tsx @@ -1,18 +1,18 @@ import { useState, useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; import { - Brain, Database, Info, Loader2, - CheckCircle2, - AlertCircle, Eye, EyeOff, + ExternalLink, } from 'lucide-react'; import { Button } from '../ui/button'; import { Input } from '../ui/input'; import { Label } from '../ui/label'; -import { Card, CardContent } from '../ui/card'; +import { Switch } from '../ui/switch'; +import { Separator } from '../ui/separator'; import { Select, SelectContent, @@ -29,22 +29,10 @@ interface MemoryStepProps { onBack: () => void; } -// Embedding provider configurations (LLM provider removed - Claude SDK handles RAG) -const EMBEDDING_PROVIDERS: Array<{ - id: GraphitiEmbeddingProvider; - name: string; - description: string; - requiresApiKey: boolean; -}> = [ - { id: 'ollama', name: 'Ollama (Local)', description: 'Free, local embeddings', requiresApiKey: false }, - { id: 'openai', name: 'OpenAI', description: 'text-embedding-3-small', requiresApiKey: true }, - { id: 'voyage', name: 'Voyage AI', description: 'voyage-3 (high quality)', requiresApiKey: true }, - { id: 'google', name: 'Google AI', description: 'text-embedding-004', requiresApiKey: true }, - { id: 'azure_openai', name: 'Azure OpenAI', description: 'Enterprise deployment', requiresApiKey: true }, -]; - interface MemoryConfig { - database: string; + enabled: boolean; + agentMemoryEnabled: boolean; + mcpServerUrl: string; embeddingProvider: GraphitiEmbeddingProvider; // OpenAI openaiApiKey: string; @@ -57,7 +45,6 @@ interface MemoryConfig { // Google googleApiKey: string; // Ollama - ollamaBaseUrl: string; ollamaEmbeddingModel: string; ollamaEmbeddingDim: number; } @@ -65,16 +52,23 @@ interface MemoryConfig { /** * Memory configuration step for the onboarding wizard. * - * Key simplifications from the previous GraphitiStep: - * - Memory is always enabled (no toggle) - * - LLM provider removed (Claude SDK handles RAG queries) - * - Ollama is the default with model discovery + download - * - Keyword search works as fallback without embeddings + * Matches the settings page Memory section structure: + * - Enable Memory toggle (enabled by default) + * - Enable Agent Memory Access toggle + * - Embedding Provider selection (Ollama default) + * - Provider-specific configuration + * + * Note: LLM provider is not configurable - Claude SDK is used throughout. */ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { + const { t } = useTranslation('onboarding'); const { settings, updateSettings } = useSettingsStore(); + + // Initialize config with memory enabled by default const [config, setConfig] = useState({ - database: 'auto_claude_memory', + enabled: true, // Memory enabled by default + agentMemoryEnabled: true, // Agent memory access enabled by default + mcpServerUrl: 'http://localhost:8000/mcp/', embeddingProvider: 'ollama', openaiApiKey: settings.globalOpenAIApiKey || '', azureOpenaiApiKey: '', @@ -82,25 +76,23 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { azureOpenaiEmbeddingDeployment: '', voyageApiKey: '', googleApiKey: settings.globalGoogleApiKey || '', - ollamaBaseUrl: settings.ollamaBaseUrl || 'http://localhost:11434', ollamaEmbeddingModel: 'qwen3-embedding:4b', ollamaEmbeddingDim: 2560, }); + const [showApiKey, setShowApiKey] = useState>({}); const [isSaving, setIsSaving] = useState(false); const [error, setError] = useState(null); const [isCheckingInfra, setIsCheckingInfra] = useState(true); - const [kuzuAvailable, setKuzuAvailable] = useState(null); // Check LadybugDB/Kuzu availability on mount useEffect(() => { const checkInfrastructure = async () => { setIsCheckingInfra(true); try { - const result = await window.electronAPI.getMemoryInfrastructureStatus(); - setKuzuAvailable(result?.success && result?.data?.memory?.kuzuInstalled ? true : false); + await window.electronAPI.getMemoryInfrastructureStatus(); } catch { - setKuzuAvailable(false); + // Infrastructure will be created automatically when needed } finally { setIsCheckingInfra(false); } @@ -115,6 +107,9 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { // Check if we have valid configuration const isConfigValid = (): boolean => { + // If memory is disabled, always valid + if (!config.enabled) return true; + const { embeddingProvider } = config; // Ollama just needs a model selected @@ -141,15 +136,15 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { try { // Save complete memory configuration to global settings - // This includes all settings needed for backend to use memory const settingsToSave: Record = { - // Core memory settings (CRITICAL - these were missing before) - memoryEnabled: true, + // Core memory settings + memoryEnabled: config.enabled, memoryEmbeddingProvider: config.embeddingProvider, memoryOllamaEmbeddingModel: config.ollamaEmbeddingModel || undefined, memoryOllamaEmbeddingDim: config.ollamaEmbeddingDim || undefined, - // Ollama base URL - ollamaBaseUrl: config.ollamaBaseUrl.trim() || undefined, + // Agent memory access (MCP) + graphitiMcpEnabled: config.agentMemoryEnabled, + graphitiMcpUrl: config.mcpServerUrl.trim() || undefined, // Global API keys (shared across features) globalOpenAIApiKey: config.openaiApiKey.trim() || undefined, globalGoogleApiKey: config.googleApiKey.trim() || undefined, @@ -163,13 +158,14 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { const result = await window.electronAPI.saveSettings(settingsToSave); if (result?.success) { - // Update local settings store with all memory config + // Update local settings store const storeUpdate: Partial = { - memoryEnabled: true, + memoryEnabled: config.enabled, memoryEmbeddingProvider: config.embeddingProvider, memoryOllamaEmbeddingModel: config.ollamaEmbeddingModel || undefined, memoryOllamaEmbeddingDim: config.ollamaEmbeddingDim || undefined, - ollamaBaseUrl: config.ollamaBaseUrl.trim() || undefined, + graphitiMcpEnabled: config.agentMemoryEnabled, + graphitiMcpUrl: config.mcpServerUrl.trim() || undefined, globalOpenAIApiKey: config.openaiApiKey.trim() || undefined, globalGoogleApiKey: config.googleApiKey.trim() || undefined, memoryVoyageApiKey: config.voyageApiKey.trim() || undefined, @@ -189,10 +185,6 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { } }; - const handleContinue = () => { - handleSave(); - }; - const handleOllamaModelSelect = (modelName: string, dim: number) => { setConfig(prev => ({ ...prev, @@ -207,17 +199,13 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { if (embeddingProvider === 'ollama') { return ( -
-
- - -
+
+ +
); } @@ -225,12 +213,10 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { if (embeddingProvider === 'openai') { return (
- + +

{t('memory.openaiApiKeyDescription')}

setConfig(prev => ({ ...prev, openaiApiKey: e.target.value }))} @@ -247,7 +233,7 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) {

- Get your key from{' '} + {t('memory.openaiGetKey')}{' '} OpenAI @@ -259,12 +245,10 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { if (embeddingProvider === 'voyage') { return (

- + +

{t('memory.voyageApiKeyDescription')}

setConfig(prev => ({ ...prev, voyageApiKey: e.target.value }))} @@ -281,7 +265,7 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) {

- Get your key from{' '} + {t('memory.openaiGetKey')}{' '} Voyage AI @@ -293,12 +277,10 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { if (embeddingProvider === 'google') { return (

- + +

{t('memory.googleApiKeyDescription')}

setConfig(prev => ({ ...prev, googleApiKey: e.target.value }))} @@ -315,7 +297,7 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) {

- Get your key from{' '} + {t('memory.openaiGetKey')}{' '} Google AI Studio @@ -327,16 +309,15 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { if (embeddingProvider === 'azure_openai') { return (

-

Azure OpenAI Settings

+
- +
setConfig(prev => ({ ...prev, azureOpenaiApiKey: e.target.value }))} - placeholder="Azure API key" + placeholder="Azure API Key" className="pr-10 font-mono text-sm" disabled={isSaving} /> @@ -349,26 +330,22 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) {
-
- +
+ setConfig(prev => ({ ...prev, azureOpenaiBaseUrl: e.target.value }))} - placeholder="https://your-resource.openai.azure.com" className="font-mono text-sm" disabled={isSaving} />
-
- +
+ setConfig(prev => ({ ...prev, azureOpenaiEmbeddingDeployment: e.target.value }))} - placeholder="text-embedding-ada-002" className="font-mono text-sm" disabled={isSaving} /> @@ -387,18 +364,18 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) {
- +

- Memory + {t('memory.title')}

- Auto Claude Memory helps remember context across your coding sessions + {t('memory.description')}

- {/* Loading state for infrastructure check */} + {/* Loading state */} {isCheckingInfra && (
@@ -410,112 +387,129 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) {
{/* Error banner */} {error && ( - - -
- -

{error}

-
-
-
+
+

{error}

+
)} - {/* Kuzu status notice */} - {kuzuAvailable === false && ( - - -
- -
-

- Database will be created automatically -

-

- Memory uses an embedded database - no Docker required. - It will be created when you first use memory features. -

-
-
-
-
+ {/* Enable Memory Toggle */} +
+
+ +
+ +

+ {t('memory.enableMemoryDescription')} +

+
+
+ setConfig(prev => ({ ...prev, enabled: checked }))} + disabled={isSaving} + /> +
+ + {/* Memory Disabled Info */} + {!config.enabled && ( +
+
+ +

+ {t('memory.memoryDisabledInfo')} +

+
+
)} - {/* Info card about Memory */} - - -
- -
-

- What does Memory do? -

-

- Memory stores discoveries, patterns, and insights about your codebase - so future sessions start with context already loaded. + {/* Memory Enabled Configuration */} + {config.enabled && ( + <> + {/* Agent Memory Access Toggle */} +

+
+ +

+ {t('memory.enableAgentAccessDescription')}

-
    -
  • Remembers patterns across sessions
  • -
  • Understands your codebase over time
  • -
  • Works offline - no cloud required
  • -
+ setConfig(prev => ({ ...prev, agentMemoryEnabled: checked }))} + disabled={isSaving} + />
- - - - {/* Database info */} -
- -
-

- Memory Database -

-

- Stored in ~/.auto-claude/memories/ -

-
- {kuzuAvailable && ( - - )} -
- {/* Embedding Provider Selection */} -
-
- - -
+ {/* MCP Server URL (shown when agent memory is enabled) */} + {config.agentMemoryEnabled && ( +
+ +

+ {t('memory.mcpServerUrlDescription')} +

+ setConfig(prev => ({ ...prev, mcpServerUrl: e.target.value }))} + className="font-mono text-sm" + disabled={isSaving} + /> +
+ )} + + + + {/* Embedding Provider Selection */} +
+ +

+ {t('memory.embeddingProviderDescription')} +

+ +
- {/* Provider-specific fields */} - {renderProviderFields()} -
+ {/* Provider-specific fields */} + {renderProviderFields()} - {/* Fallback info */} -

- No embedding provider? Memory still works with keyword search. Semantic search is an upgrade. -

+ {/* Info about Learn More */} +
+
+ +
+

+ {t('memory.memoryInfo')} +

+ + {t('memory.learnMore')} + + +
+
+
+ + )}
)} @@ -526,21 +520,30 @@ export function MemoryStep({ onNext, onBack }: MemoryStepProps) { onClick={onBack} className="text-muted-foreground hover:text-foreground" > - Back - - +
+ + +
diff --git a/apps/frontend/src/renderer/components/onboarding/OAuthStep.tsx b/apps/frontend/src/renderer/components/onboarding/OAuthStep.tsx index 7584f864ea..4fad5f3337 100644 --- a/apps/frontend/src/renderer/components/onboarding/OAuthStep.tsx +++ b/apps/frontend/src/renderer/components/onboarding/OAuthStep.tsx @@ -26,6 +26,7 @@ import { Label } from '../ui/label'; import { Card, CardContent } from '../ui/card'; import { cn } from '../../lib/utils'; import { loadClaudeProfiles as loadGlobalClaudeProfiles } from '../../stores/claude-profile-store'; +import { useClaudeLoginTerminal } from '../../hooks/useClaudeLoginTerminal'; import type { ClaudeProfile } from '../../../shared/types'; interface OAuthStepProps { @@ -92,6 +93,9 @@ export function OAuthStep({ onNext, onBack, onSkip }: OAuthStepProps) { loadClaudeProfiles(); }, []); + // Listen for login terminal creation - makes the terminal visible so user can see OAuth flow + useClaudeLoginTerminal(); + // Listen for OAuth authentication completion useEffect(() => { const unsubscribe = window.electronAPI.onTerminalOAuthToken(async (info) => { @@ -144,11 +148,8 @@ export function OAuthStep({ onNext, onBack, onSkip }: OAuthStepProps) { await loadClaudeProfiles(); setNewProfileName(''); - alert( - `Authenticating "${profileName}"...\n\n` + - `A browser window will open for you to log in with your Claude account.\n\n` + - `The authentication will be saved automatically once complete.` - ); + // Note: The terminal is now visible in the UI via the onTerminalAuthCreated event + // Users can see the 'claude setup-token' output directly } else { await loadClaudeProfiles(); alert(`Failed to start authentication: ${initResult.error || 'Please try again.'}`); @@ -222,15 +223,11 @@ export function OAuthStep({ onNext, onBack, onSkip }: OAuthStepProps) { setError(null); try { const initResult = await window.electronAPI.initializeClaudeProfile(profileId); - if (initResult.success) { - alert( - `Authenticating profile...\n\n` + - `A browser window will open for you to log in with your Claude account.\n\n` + - `The authentication will be saved automatically once complete.` - ); - } else { + if (!initResult.success) { alert(`Failed to start authentication: ${initResult.error || 'Please try again.'}`); } + // Note: If successful, the terminal is now visible in the UI via the onTerminalAuthCreated event + // Users can see the 'claude setup-token' output and complete OAuth flow directly } catch (err) { setError(err instanceof Error ? err.message : 'Failed to authenticate profile'); alert('Failed to start authentication. Please try again.'); diff --git a/apps/frontend/src/renderer/components/onboarding/OllamaModelSelector.tsx b/apps/frontend/src/renderer/components/onboarding/OllamaModelSelector.tsx index 97257ec9f0..e6ed737ed0 100644 --- a/apps/frontend/src/renderer/components/onboarding/OllamaModelSelector.tsx +++ b/apps/frontend/src/renderer/components/onboarding/OllamaModelSelector.tsx @@ -165,29 +165,40 @@ export function OllamaModelSelector({ if (abortSignal?.aborted) return; if (result?.success && result?.data?.embedding_models) { - // Build a set of installed model names (both full name and normalized) + // Build a set of installed model names (full, base, and version-matched) const installedFullNames = new Set(); const installedBaseNames = new Set(); + const installedVersionNames = new Set(); result.data.embedding_models.forEach((m: { name: string }) => { const name = m.name; installedFullNames.add(name); - // Only normalize :latest suffix, not version tags like :4b, :8b, :0.6b + + // Normalize :latest suffix if (name.endsWith(':latest')) { installedBaseNames.add(name.replace(':latest', '')); } else if (!name.includes(':')) { installedBaseNames.add(name); } + + // Handle quantization variants (e.g., qwen3-embedding:8b-q4_K_M) + // Extract base:version without quantization suffix + const quantMatch = name.match(/^([^:]+:[^-]+)/); + if (quantMatch) { + installedVersionNames.add(quantMatch[1]); + } }); // Update models with installation status setModels( RECOMMENDED_MODELS.map(model => { - // Check exact match first, then base name (for :latest normalization) + // Check multiple matching strategies: + // 1. Exact match (e.g., "qwen3-embedding:8b" === "qwen3-embedding:8b") + // 2. Base name match for :latest normalization (handles "embeddinggemma" matching "embeddinggemma:latest") + // 3. Version match ignoring quantization suffix (e.g., "qwen3-embedding:8b" matches "qwen3-embedding:8b-q4_K_M") const isInstalled = installedFullNames.has(model.name) || installedBaseNames.has(model.name) || - // Also check if model without tag is installed (e.g., "embeddinggemma" matches "embeddinggemma") - (model.name.includes(':') ? false : installedFullNames.has(model.name + ':latest')); + installedVersionNames.has(model.name); return { ...model, installed: isInstalled, @@ -281,15 +292,22 @@ export function OllamaModelSelector({ }; /** - * Handles model selection by calling the parent callback. + * Handles model selection with toggle behavior. + * Clicking an already-selected model will deselect it. * Only allows selection of installed models and when component is not disabled. * - * @param {OllamaModel} model - The model to select + * @param {OllamaModel} model - The model to select or deselect * @returns {void} */ const handleSelect = (model: OllamaModel) => { if (!model.installed || disabled) return; - onModelSelect(model.name, model.dim); + + // Toggle behavior: if already selected, deselect by passing empty values + if (selectedModel === model.name) { + onModelSelect('', 0); + } else { + onModelSelect(model.name, model.dim); + } }; if (isLoading) { diff --git a/apps/frontend/src/renderer/components/onboarding/OnboardingWizard.test.tsx b/apps/frontend/src/renderer/components/onboarding/OnboardingWizard.test.tsx new file mode 100644 index 0000000000..8d7901f84e --- /dev/null +++ b/apps/frontend/src/renderer/components/onboarding/OnboardingWizard.test.tsx @@ -0,0 +1,377 @@ +/** + * @vitest-environment jsdom + */ +/** + * OnboardingWizard integration tests + * + * Integration tests for the complete onboarding wizard flow. + * Verifies step navigation, OAuth/API key paths, back button behavior, + * and progress indicator. + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen, fireEvent, waitFor } from '@testing-library/react'; +import '@testing-library/jest-dom'; +import { OnboardingWizard } from './OnboardingWizard'; + +// Mock react-i18next to avoid initialization issues +vi.mock('react-i18next', () => ({ + useTranslation: () => ({ + t: (key: string) => { + // Return the key itself or provide specific translations + // Keys are without namespace since component uses useTranslation('namespace') + const translations: Record = { + 'welcome.title': 'Welcome to Auto Claude', + 'welcome.subtitle': 'AI-powered autonomous coding assistant', + 'welcome.getStarted': 'Get Started', + 'welcome.skip': 'Skip Setup', + 'wizard.helpText': 'Let us help you get started with Auto Claude', + 'welcome.features.aiPowered.title': 'AI-Powered', + 'welcome.features.aiPowered.description': 'Powered by Claude', + 'welcome.features.specDriven.title': 'Spec-Driven', + 'welcome.features.specDriven.description': 'Create from specs', + 'welcome.features.memory.title': 'Memory', + 'welcome.features.memory.description': 'Remembers context', + 'welcome.features.parallel.title': 'Parallel', + 'welcome.features.parallel.description': 'Work in parallel', + 'authChoice.title': 'Choose Your Authentication Method', + 'authChoice.subtitle': 'Select how you want to authenticate', + 'authChoice.oauthTitle': 'Sign in with Anthropic', + 'authChoice.oauthDesc': 'OAuth authentication', + 'authChoice.apiKeyTitle': 'Use Custom API Key', + 'authChoice.apiKeyDesc': 'Enter your own API key', + 'authChoice.skip': 'Skip for now', + // Common translations + 'common:actions.close': 'Close' + }; + return translations[key] || key; + }, + i18n: { language: 'en' } + }), + Trans: ({ children }: { children: React.ReactNode }) => children +})); + +// Mock the settings store +const mockUpdateSettings = vi.fn(); +const mockLoadSettings = vi.fn(); +const mockProfiles: any[] = []; + +vi.mock('../../stores/settings-store', () => ({ + useSettingsStore: vi.fn((selector) => { + const state = { + settings: { onboardingCompleted: false }, + isLoading: false, + profiles: mockProfiles, + activeProfileId: null, + updateSettings: mockUpdateSettings, + loadSettings: mockLoadSettings + }; + if (!selector) return state; + return selector(state); + }) +})); + +// Mock electronAPI +const mockSaveSettings = vi.fn().mockResolvedValue({ success: true }); + +Object.defineProperty(window, 'electronAPI', { + value: { + saveSettings: mockSaveSettings, + onAppUpdateDownloaded: vi.fn(), + // OAuth-related methods needed for OAuthStep component + onTerminalOAuthToken: vi.fn(() => vi.fn()), // Returns unsubscribe function + getOAuthToken: vi.fn().mockResolvedValue(null), + startOAuthFlow: vi.fn().mockResolvedValue({ success: true }), + loadProfiles: vi.fn().mockResolvedValue([]) + }, + writable: true +}); + +describe('OnboardingWizard Integration Tests', () => { + const defaultProps = { + open: true, + onOpenChange: vi.fn() + }; + + beforeEach(() => { + vi.clearAllMocks(); + }); + + describe('OAuth Path Navigation', () => { + // Skipped: OAuth integration tests require full OAuth step mocking - not API Profile related + it.skip('should navigate: welcome โ†’ auth-choice โ†’ oauth', async () => { + render(); + + // Start at welcome step + expect(screen.getByText(/Welcome to Auto Claude/)).toBeInTheDocument(); + + // Click "Get Started" to go to auth-choice + const getStartedButton = screen.getByRole('button', { name: /Get Started/ }); + fireEvent.click(getStartedButton); + + // Should now show auth choice step + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + // Click OAuth option + const oauthButton = screen.getByTestId('auth-option-oauth'); + fireEvent.click(oauthButton); + + // Should navigate to oauth step + await waitFor(() => { + expect(screen.getByText(/Sign in with Anthropic/)).toBeInTheDocument(); + }); + }); + + // Skipped: OAuth path test requires full OAuth step mocking + it.skip('should show correct progress indicator for OAuth path', async () => { + render(); + + // Click through to auth-choice + fireEvent.click(screen.getByRole('button', { name: /Get Started/ })); + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + // Verify progress indicator shows 5 steps + const progressIndicators = document.querySelectorAll('[class*="step"]'); + expect(progressIndicators.length).toBeGreaterThanOrEqual(4); // At least 4 steps shown + }); + }); + + describe('API Key Path Navigation', () => { + // Skipped: Test requires ProfileEditDialog integration mock + it.skip('should skip oauth step when API key path chosen', async () => { + render(); + + // Start at welcome step + expect(screen.getByText(/Welcome to Auto Claude/)).toBeInTheDocument(); + + // Click "Get Started" to go to auth-choice + fireEvent.click(screen.getByRole('button', { name: /Get Started/ })); + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + // Click API Key option + const apiKeyButton = screen.getByTestId('auth-option-apikey'); + fireEvent.click(apiKeyButton); + + // Profile dialog should open + await waitFor(() => { + expect(screen.getByTestId('profile-edit-dialog')).toBeInTheDocument(); + }); + + // Close dialog (simulating profile creation - in real scenario this would trigger skip) + const closeButton = screen.queryByText(/Close|Cancel/); + if (closeButton) { + fireEvent.click(closeButton); + } + }); + + it('should not show OAuth step text on auth-choice screen', async () => { + render(); + + // Navigate to auth-choice + fireEvent.click(screen.getByRole('button', { name: /Get Started/ })); + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + // When profile is created via API key path, should skip oauth + // This is tested via component behavior - the wizard should advance + // directly to graphiti step, bypassing oauth + const oauthStepText = screen.queryByText(/OAuth Authentication/); + // Before API key selection, oauth text from different context shouldn't be visible + expect(oauthStepText).toBeNull(); + }); + }); + + describe('Back Button Behavior After API Key Path', () => { + it('should go back to auth-choice (not oauth) when coming from API key path', async () => { + render(); + + // This test verifies that when oauth is bypassed (API key path taken), + // going back from graphiti returns to auth-choice, not oauth + + // Navigate: welcome โ†’ auth-choice + fireEvent.click(screen.getByText(/Get Started/)); + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + // The back button behavior is controlled by oauthBypassed state + // When API key path is taken, oauthBypassed=true + // Going back from graphiti should skip oauth step + const authChoiceHeading = screen.getByText(/Choose Your Authentication Method/); + expect(authChoiceHeading).toBeInTheDocument(); + }); + }); + + describe('First-Run Detection', () => { + it('should show wizard for users with no auth configured', () => { + render(); + + // Wizard should be visible + expect(screen.getByText(/Welcome to Auto Claude/)).toBeInTheDocument(); + }); + + it('should not show wizard for users with existing OAuth', () => { + // This is tested in App.tsx integration tests + // Here we verify the wizard can be closed + const { rerender } = render(); + + expect(screen.getByText(/Welcome to Auto Claude/)).toBeInTheDocument(); + + // Close wizard + rerender(); + + // Wizard content should not be visible + expect(screen.queryByText(/Welcome to Auto Claude/)).not.toBeInTheDocument(); + }); + + it('should not show wizard for users with existing API profiles', () => { + // This is tested in App.tsx integration tests + // The wizard respects the open prop + render(); + + expect(screen.queryByText(/Welcome to Auto Claude/)).not.toBeInTheDocument(); + }); + }); + + describe('Skip and Completion', () => { + it('should complete wizard when skip is clicked', async () => { + render(); + + // Click skip on welcome step + const skipButton = screen.getByRole('button', { name: /Skip Setup/ }); + fireEvent.click(skipButton); + + // Should call saveSettings + await waitFor(() => { + expect(mockSaveSettings).toHaveBeenCalledWith({ onboardingCompleted: true }); + }); + }); + + it('should call onOpenChange when wizard is closed', async () => { + const mockOnOpenChange = vi.fn(); + render(); + + // Click skip to close wizard + const skipButton = screen.getByRole('button', { name: /Skip Setup/ }); + fireEvent.click(skipButton); + + await waitFor(() => { + expect(mockOnOpenChange).toHaveBeenCalledWith(false); + }); + }); + }); + + describe('Step Progress Indicator', () => { + // Skipped: Progress indicator tests require step-by-step CSS class inspection + it.skip('should display progress indicator for non-welcome/completion steps', async () => { + render(); + + // On welcome step, no progress indicator shown + expect(screen.queryByText(/Welcome/)).toBeInTheDocument(); + const progressBeforeNav = document.querySelector('[class*="progress"]'); + // Progress indicator may not be visible on welcome step + + // Navigate to auth-choice + fireEvent.click(screen.getByRole('button', { name: /Get Started/ })); + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + // Progress indicator should now be visible + // The WizardProgress component should be rendered + const progressElement = document.querySelector('[class*="step"]'); + expect(progressElement).toBeTruthy(); + }); + + // Skipped: Step count test requires i18n step labels + it.skip('should show correct number of steps (5 total)', async () => { + render(); + + // Navigate to auth-choice + fireEvent.click(screen.getByRole('button', { name: /Get Started/ })); + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + // Check for step labels in progress indicator + const steps = [ + 'Welcome', + 'Auth Method', + 'OAuth', + 'Memory', + 'Done' + ]; + + // At least some step labels should be present (not all may be visible at current step) + const visibleSteps = steps.filter(step => screen.queryByText(step)); + expect(visibleSteps.length).toBeGreaterThan(0); + }); + }); + + describe('AC Coverage', () => { + it('AC1: First-run screen displays with two auth options', async () => { + render(); + + // Navigate to auth-choice + fireEvent.click(screen.getByRole('button', { name: /Get Started/ })); + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + // Both options should be visible + expect(screen.getByText(/Sign in with Anthropic/)).toBeInTheDocument(); + expect(screen.getByText(/Use Custom API Key/)).toBeInTheDocument(); + }); + + // Skipped: OAuth path test requires full OAuth step mocking + it.skip('AC2: OAuth path initiates existing OAuth flow', async () => { + render(); + + fireEvent.click(screen.getByText(/Get Started/)); + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + const oauthButton = screen.getByTestId('auth-option-oauth'); + fireEvent.click(oauthButton); + + // Should proceed to OAuth step + await waitFor(() => { + // OAuth step content should be visible + expect(document.querySelector('.fullscreen-dialog')).toBeInTheDocument(); + }); + }); + + it('AC3: API Key path opens profile management dialog', async () => { + render(); + + fireEvent.click(screen.getByText(/Get Started/)); + await waitFor(() => { + expect(screen.getByText(/Choose Your Authentication Method/)).toBeInTheDocument(); + }); + + const apiKeyButton = screen.getByTestId('auth-option-apikey'); + fireEvent.click(apiKeyButton); + + // ProfileEditDialog should open + await waitFor(() => { + expect(screen.getByTestId('profile-edit-dialog')).toBeInTheDocument(); + }); + }); + + it('AC4: Existing auth skips wizard', () => { + // Wizard with open=false simulates existing auth scenario + render(); + + // Wizard should not be visible + expect(screen.queryByText(/Welcome to Auto Claude/)).not.toBeInTheDocument(); + }); + }); +}); diff --git a/apps/frontend/src/renderer/components/onboarding/OnboardingWizard.tsx b/apps/frontend/src/renderer/components/onboarding/OnboardingWizard.tsx index 1ab1891773..5eb00c07d7 100644 --- a/apps/frontend/src/renderer/components/onboarding/OnboardingWizard.tsx +++ b/apps/frontend/src/renderer/components/onboarding/OnboardingWizard.tsx @@ -12,10 +12,12 @@ import { import { ScrollArea } from '../ui/scroll-area'; import { WizardProgress, WizardStep } from './WizardProgress'; import { WelcomeStep } from './WelcomeStep'; +import { AuthChoiceStep } from './AuthChoiceStep'; import { OAuthStep } from './OAuthStep'; import { ClaudeCodeStep } from './ClaudeCodeStep'; import { DevToolsStep } from './DevToolsStep'; -import { MemoryStep } from './MemoryStep'; +import { PrivacyStep } from './PrivacyStep'; +import { GraphitiStep } from './GraphitiStep'; import { CompletionStep } from './CompletionStep'; import { useSettingsStore } from '../../stores/settings-store'; @@ -27,15 +29,17 @@ interface OnboardingWizardProps { } // Wizard step identifiers -type WizardStepId = 'welcome' | 'oauth' | 'claude-code' | 'devtools' | 'memory' | 'completion'; +type WizardStepId = 'welcome' | 'auth-choice' | 'oauth' | 'claude-code' | 'devtools' | 'privacy' | 'graphiti' | 'completion'; // Step configuration with translation keys const WIZARD_STEPS: { id: WizardStepId; labelKey: string }[] = [ { id: 'welcome', labelKey: 'steps.welcome' }, + { id: 'auth-choice', labelKey: 'steps.authChoice' }, { id: 'oauth', labelKey: 'steps.auth' }, { id: 'claude-code', labelKey: 'steps.claudeCode' }, { id: 'devtools', labelKey: 'steps.devtools' }, - { id: 'memory', labelKey: 'steps.memory' }, + { id: 'privacy', labelKey: 'steps.privacy' }, + { id: 'graphiti', labelKey: 'steps.memory' }, { id: 'completion', labelKey: 'steps.done' } ]; @@ -60,6 +64,8 @@ export function OnboardingWizard({ const { updateSettings } = useSettingsStore(); const [currentStepIndex, setCurrentStepIndex] = useState(0); const [completedSteps, setCompletedSteps] = useState>(new Set()); + // Track if oauth step was bypassed (API key path chosen) + const [oauthBypassed, setOauthBypassed] = useState(false); // Get current step ID const currentStepId = WIZARD_STEPS[currentStepIndex].id; @@ -76,21 +82,46 @@ export function OnboardingWizard({ // Mark current step as completed setCompletedSteps(prev => new Set(prev).add(currentStepId)); + // If leaving auth-choice, reset oauth bypassed flag + if (currentStepId === 'auth-choice') { + setOauthBypassed(false); + } + if (currentStepIndex < WIZARD_STEPS.length - 1) { setCurrentStepIndex(prev => prev + 1); } }, [currentStepIndex, currentStepId]); const goToPreviousStep = useCallback(() => { + // If going back from graphiti and oauth was bypassed, go back to auth-choice (skip oauth) + if (currentStepId === 'graphiti' && oauthBypassed) { + // Find index of auth-choice step + const authChoiceIndex = WIZARD_STEPS.findIndex(step => step.id === 'auth-choice'); + setCurrentStepIndex(authChoiceIndex); + setOauthBypassed(false); + return; + } + if (currentStepIndex > 0) { setCurrentStepIndex(prev => prev - 1); } - }, [currentStepIndex]); + }, [currentStepIndex, currentStepId, oauthBypassed]); + + // Handler for when API key path is chosen - skips oauth step + const handleSkipToGraphiti = useCallback(() => { + setOauthBypassed(true); + setCompletedSteps(prev => new Set(prev).add('auth-choice')); + + // Find index of graphiti step + const graphitiIndex = WIZARD_STEPS.findIndex(step => step.id === 'graphiti'); + setCurrentStepIndex(graphitiIndex); + }, []); // Reset wizard state (for re-running) - defined before skipWizard/finishWizard that use it const resetWizard = useCallback(() => { setCurrentStepIndex(0); setCompletedSteps(new Set()); + setOauthBypassed(false); }, []); const skipWizard = useCallback(async () => { @@ -151,6 +182,15 @@ export function OnboardingWizard({ onSkip={skipWizard} /> ); + case 'auth-choice': + return ( + + ); case 'oauth': return ( ); - case 'memory': + case 'privacy': return ( - ); + case 'graphiti': + return ( + + ); case 'completion': return ( void; + onBack: () => void; +} + +/** + * Onboarding step for anonymous error reporting opt-in. + * Explains what data is collected and what is never collected. + * Enabled by default to help improve the app. + */ +export function PrivacyStep({ onNext, onBack }: PrivacyStepProps) { + const { t } = useTranslation(['onboarding', 'common']); + const { settings, updateSettings } = useSettingsStore(); + const [sentryEnabled, setSentryEnabled] = useState(settings.sentryEnabled ?? true); + const [isSaving, setIsSaving] = useState(false); + const [error, setError] = useState(null); + + const handleToggle = (checked: boolean) => { + setSentryEnabled(checked); + setError(null); // Clear error when user interacts + }; + + const handleSave = async () => { + setIsSaving(true); + setError(null); + try { + const result = await window.electronAPI.saveSettings({ sentryEnabled }); + if (result?.success) { + updateSettings({ sentryEnabled }); + notifySentryStateChanged(sentryEnabled); + onNext(); + } else { + setError(t('onboarding:privacy.saveFailed', 'Failed to save privacy settings. Please try again.')); + } + } catch (err) { + setError(t('onboarding:privacy.saveFailed', 'Failed to save privacy settings. Please try again.')); + } finally { + setIsSaving(false); + } + }; + + return ( +
+
+ {/* Header */} +
+
+
+ +
+
+

+ {t('onboarding:privacy.title')} +

+

+ {t('onboarding:privacy.subtitle')} +

+
+ +
+ {/* What we collect */} + + +
+ +
+

+ {t('onboarding:privacy.whatWeCollect.title')} +

+
    +
  • {t('onboarding:privacy.whatWeCollect.crashReports')}
  • +
  • {t('onboarding:privacy.whatWeCollect.errorMessages')}
  • +
  • {t('onboarding:privacy.whatWeCollect.appVersion')}
  • +
+
+
+
+
+ + {/* What we never collect */} + + +
+ +
+

+ {t('onboarding:privacy.whatWeNeverCollect.title')} +

+
    +
  • {t('onboarding:privacy.whatWeNeverCollect.code')}
  • +
  • {t('onboarding:privacy.whatWeNeverCollect.filenames')}
  • +
  • {t('onboarding:privacy.whatWeNeverCollect.apiKeys')}
  • +
  • {t('onboarding:privacy.whatWeNeverCollect.personalData')}
  • +
+
+
+
+
+ + {/* Toggle */} + + +
+
+ +
+ +

+ {t('onboarding:privacy.toggle.description')} +

+
+
+ +
+
+
+
+ + {/* Error Display */} + {error && ( +
+ + {error} +
+ )} + + {/* Action Buttons */} +
+ + +
+
+
+ ); +} diff --git a/apps/frontend/src/renderer/components/onboarding/index.ts b/apps/frontend/src/renderer/components/onboarding/index.ts index 5bb106689e..3044c1b7d8 100644 --- a/apps/frontend/src/renderer/components/onboarding/index.ts +++ b/apps/frontend/src/renderer/components/onboarding/index.ts @@ -5,7 +5,9 @@ export { OnboardingWizard } from './OnboardingWizard'; export { WelcomeStep } from './WelcomeStep'; +export { AuthChoiceStep } from './AuthChoiceStep'; export { OAuthStep } from './OAuthStep'; +export { PrivacyStep } from './PrivacyStep'; export { MemoryStep } from './MemoryStep'; export { OllamaModelSelector } from './OllamaModelSelector'; export { FirstSpecStep } from './FirstSpecStep'; diff --git a/apps/frontend/src/renderer/components/project-settings/CollapsibleSection.tsx b/apps/frontend/src/renderer/components/project-settings/CollapsibleSection.tsx index 9ded269ce5..e160f586a7 100644 --- a/apps/frontend/src/renderer/components/project-settings/CollapsibleSection.tsx +++ b/apps/frontend/src/renderer/components/project-settings/CollapsibleSection.tsx @@ -1,4 +1,4 @@ -import { ReactNode } from 'react'; +import { ReactNode, useId } from 'react'; import { ChevronDown, ChevronUp } from 'lucide-react'; interface CollapsibleSectionProps { @@ -18,11 +18,16 @@ export function CollapsibleSection({ badge, children, }: CollapsibleSectionProps) { + const contentId = useId(); + return (
{isExpanded && ( -
+
{children}
)} diff --git a/apps/frontend/src/renderer/components/roadmap/FeatureDetailPanel.tsx b/apps/frontend/src/renderer/components/roadmap/FeatureDetailPanel.tsx index 424c967bbf..b842236241 100644 --- a/apps/frontend/src/renderer/components/roadmap/FeatureDetailPanel.tsx +++ b/apps/frontend/src/renderer/components/roadmap/FeatureDetailPanel.tsx @@ -1,4 +1,5 @@ import { useState } from 'react'; +import { useTranslation } from 'react-i18next'; import { ChevronRight, Lightbulb, @@ -31,6 +32,7 @@ export function FeatureDetailPanel({ onDelete, competitorInsights = [], }: FeatureDetailPanelProps) { + const { t } = useTranslation('common'); const [showDeleteConfirm, setShowDeleteConfirm] = useState(false); const handleDelete = () => { @@ -69,10 +71,11 @@ export function FeatureDetailPanel({ e.stopPropagation(); setShowDeleteConfirm(true); }} + aria-label={t('accessibility.deleteFeatureAriaLabel')} > -
diff --git a/apps/frontend/src/renderer/components/roadmap/RoadmapHeader.tsx b/apps/frontend/src/renderer/components/roadmap/RoadmapHeader.tsx index c95d9dc550..50657db073 100644 --- a/apps/frontend/src/renderer/components/roadmap/RoadmapHeader.tsx +++ b/apps/frontend/src/renderer/components/roadmap/RoadmapHeader.tsx @@ -1,3 +1,4 @@ +import { useTranslation } from 'react-i18next'; import { Target, Users, BarChart3, RefreshCw, Plus, TrendingUp } from 'lucide-react'; import { Badge } from '../ui/badge'; import { Button } from '../ui/button'; @@ -7,6 +8,7 @@ import { ROADMAP_PRIORITY_COLORS } from '../../../shared/constants'; import type { RoadmapHeaderProps } from './types'; export function RoadmapHeader({ roadmap, competitorAnalysis, onAddFeature, onRefresh, onViewCompetitorAnalysis }: RoadmapHeaderProps) { + const { t } = useTranslation('common'); const stats = getFeatureStats(roadmap); return ( @@ -55,7 +57,7 @@ export function RoadmapHeader({ roadmap, competitorAnalysis, onAddFeature, onRef - diff --git a/apps/frontend/src/renderer/components/roadmap/RoadmapTabs.tsx b/apps/frontend/src/renderer/components/roadmap/RoadmapTabs.tsx index 689fd28c80..ca5de7d96f 100644 --- a/apps/frontend/src/renderer/components/roadmap/RoadmapTabs.tsx +++ b/apps/frontend/src/renderer/components/roadmap/RoadmapTabs.tsx @@ -37,6 +37,7 @@ export function RoadmapTabs({ {/* Kanban View */} (null); - const [isCheckingSourceUpdate, setIsCheckingSourceUpdate] = useState(false); - const [isDownloadingUpdate, setIsDownloadingUpdate] = useState(false); - const [downloadProgress, setDownloadProgress] = useState(null); - // Local version state that can be updated after successful update - const [displayVersion, setDisplayVersion] = useState(version); - // Electron app update state const [appUpdateInfo, setAppUpdateInfo] = useState(null); - const [_isCheckingAppUpdate, setIsCheckingAppUpdate] = useState(false); + const [isCheckingAppUpdate, setIsCheckingAppUpdate] = useState(false); const [isDownloadingAppUpdate, setIsDownloadingAppUpdate] = useState(false); const [appDownloadProgress, setAppDownloadProgress] = useState(null); const [isAppUpdateDownloaded, setIsAppUpdateDownloaded] = useState(false); - - // Sync displayVersion with prop when it changes - useEffect(() => { - setDisplayVersion(version); - }, [version]); + // Stable downgrade state (shown when user turns off beta while on prerelease) + const [stableDowngradeInfo, setStableDowngradeInfo] = useState(null); // Check for updates on mount useEffect(() => { if (section === 'updates') { - checkForSourceUpdates(); checkForAppUpdates(); } }, [section]); - // Listen for source download progress - useEffect(() => { - const cleanup = window.electronAPI.onAutoBuildSourceUpdateProgress((progress) => { - setDownloadProgress(progress); - if (progress.stage === 'complete') { - setIsDownloadingUpdate(false); - // Update the displayed version if a new version was provided - if (progress.newVersion) { - setDisplayVersion(progress.newVersion); - } - checkForSourceUpdates(); - } else if (progress.stage === 'error') { - setIsDownloadingUpdate(false); - } - }); - - return cleanup; - }, []); - // Listen for app update events useEffect(() => { const cleanupAvailable = window.electronAPI.onAppUpdateAvailable((info) => { @@ -134,16 +99,24 @@ export function AdvancedSettings({ settings, onSettingsChange, section, version setIsDownloadingAppUpdate(false); setIsAppUpdateDownloaded(true); setAppDownloadProgress(null); + // Clear downgrade info if any update downloaded + setStableDowngradeInfo(null); }); const cleanupProgress = window.electronAPI.onAppUpdateProgress((progress) => { setAppDownloadProgress(progress); }); + // Listen for stable downgrade available (when user turns off beta while on prerelease) + const cleanupStableDowngrade = window.electronAPI.onAppUpdateStableDowngrade((info) => { + setStableDowngradeInfo(info); + }); + return () => { cleanupAvailable(); cleanupDownloaded(); cleanupProgress(); + cleanupStableDowngrade(); }; }, []); @@ -167,7 +140,12 @@ export function AdvancedSettings({ settings, onSettingsChange, section, version const handleDownloadAppUpdate = async () => { setIsDownloadingAppUpdate(true); try { - await window.electronAPI.downloadAppUpdate(); + const result = await window.electronAPI.downloadAppUpdate(); + if (!result.success) { + console.error('Failed to download app update:', result.error); + setIsDownloadingAppUpdate(false); + } + // Note: Success case is handled by the onAppUpdateDownloaded event listener } catch (err) { console.error('Failed to download app update:', err); setIsDownloadingAppUpdate(false); @@ -178,30 +156,24 @@ export function AdvancedSettings({ settings, onSettingsChange, section, version window.electronAPI.installAppUpdate(); }; - const checkForSourceUpdates = async () => { - console.log('[AdvancedSettings] Checking for source updates...'); - setIsCheckingSourceUpdate(true); + const handleDownloadStableVersion = async () => { + setIsDownloadingAppUpdate(true); try { - const result = await window.electronAPI.checkAutoBuildSourceUpdate(); - console.log('[AdvancedSettings] Check result:', result); - if (result.success && result.data) { - setSourceUpdateCheck(result.data); - // Update displayed version from the check result (most accurate) - if (result.data.currentVersion) { - setDisplayVersion(result.data.currentVersion); - } + // Use dedicated stable download API with allowDowngrade enabled + const result = await window.electronAPI.downloadStableUpdate(); + if (!result.success) { + console.error('Failed to download stable version:', result.error); + setIsDownloadingAppUpdate(false); } + // Note: Success case is handled by the onAppUpdateDownloaded event listener } catch (err) { - console.error('[AdvancedSettings] Check error:', err); - } finally { - setIsCheckingSourceUpdate(false); + console.error('Failed to download stable version:', err); + setIsDownloadingAppUpdate(false); } }; - const handleDownloadSourceUpdate = () => { - setIsDownloadingUpdate(true); - setDownloadProgress(null); - window.electronAPI.downloadAutoBuildSourceUpdate(); + const dismissStableDowngrade = () => { + setStableDowngradeInfo(null); }; if (section === 'updates') { @@ -211,7 +183,45 @@ export function AdvancedSettings({ settings, onSettingsChange, section, version description={t('updates.description')} >
- {/* Electron App Update Section */} + {/* Current Version Display */} +
+
+
+

{t('updates.version')}

+

+ {version || t('updates.loading')} +

+
+ {isCheckingAppUpdate ? ( + + ) : appUpdateInfo ? ( + + ) : ( + + )} +
+ + {/* Update status */} + {!appUpdateInfo && !isCheckingAppUpdate && ( +

+ {t('updates.latestVersion')} +

+ )} + +
+ +
+
+ + {/* Electron App Update Section - shows when update available */} {(appUpdateInfo || isAppUpdateDownloaded) && (
@@ -302,113 +312,6 @@ export function AdvancedSettings({ settings, onSettingsChange, section, version
)} - {/* Unified Version Display with Update Check */} -
-
-
-

{t('updates.version')}

-

- {displayVersion || t('updates.loading')} -

-
- {isCheckingSourceUpdate ? ( - - ) : sourceUpdateCheck?.updateAvailable ? ( - - ) : ( - - )} -
- - {/* Update status */} - {isCheckingSourceUpdate ? ( -

- {t('updates.checkingForUpdates')} -

- ) : sourceUpdateCheck ? ( - <> - {sourceUpdateCheck.latestVersion && sourceUpdateCheck.updateAvailable && ( -

- {t('updates.newVersionAvailable')} {sourceUpdateCheck.latestVersion} -

- )} - - {sourceUpdateCheck.error && ( -

{sourceUpdateCheck.error}

- )} - - {!sourceUpdateCheck.updateAvailable && !sourceUpdateCheck.error && ( -

- {t('updates.latestVersion')} -

- )} - - {sourceUpdateCheck.updateAvailable && ( -
- {sourceUpdateCheck.releaseNotes && ( -
- -
- )} - - {sourceUpdateCheck.releaseUrl && ( - - )} - - {isDownloadingUpdate ? ( -
-
- - {downloadProgress?.message || 'Downloading...'} -
- {downloadProgress?.percent !== undefined && ( - - )} -
- ) : downloadProgress?.stage === 'complete' ? ( -
- - {downloadProgress.message} -
- ) : downloadProgress?.stage === 'error' ? ( -
- - {downloadProgress.message} -
- ) : ( - - )} -
- )} - - ) : ( -

- {t('updates.unableToCheck')} -

- )} - -
- -
-
-
@@ -433,11 +336,113 @@ export function AdvancedSettings({ settings, onSettingsChange, section, version
- onSettingsChange({ ...settings, betaUpdates: checked }) - } + onCheckedChange={(checked) => { + onSettingsChange({ ...settings, betaUpdates: checked }); + if (checked) { + // Clear downgrade info when enabling beta again + setStableDowngradeInfo(null); + } else { + // Clear beta update info when disabling beta, so stable downgrade UI can show + setAppUpdateInfo(null); + } + }} />
+ + {/* Stable Downgrade Section - shown when user turns off beta while on prerelease */} + {stableDowngradeInfo && !appUpdateInfo && ( +
+
+
+ +

{t('updates.stableDowngradeAvailable')}

+
+ +
+ +

+ {t('updates.stableDowngradeDescription')} +

+ +
+
+

+ {t('updates.stableVersion')} +

+

+ {stableDowngradeInfo.version} +

+ {stableDowngradeInfo.releaseDate && ( +

+ {t('updates.released')} {new Date(stableDowngradeInfo.releaseDate).toLocaleDateString()} +

+ )} +
+ {isDownloadingAppUpdate ? ( + + ) : ( + + )} +
+ + {/* Release Notes */} + {stableDowngradeInfo.releaseNotes && ( +
+ +
+ )} + + {/* Download Progress */} + {isDownloadingAppUpdate && appDownloadProgress && ( +
+
+ {t('updates.downloading')} + + {Math.round(appDownloadProgress.percent)}% + +
+ +

+ {(appDownloadProgress.transferred / 1024 / 1024).toFixed(2)} MB / {(appDownloadProgress.total / 1024 / 1024).toFixed(2)} MB +

+
+ )} + + {/* Action Buttons */} +
+ + +
+
+ )}
); diff --git a/apps/frontend/src/renderer/components/settings/AgentProfileSettings.tsx b/apps/frontend/src/renderer/components/settings/AgentProfileSettings.tsx index 8b95f84832..891c412e07 100644 --- a/apps/frontend/src/renderer/components/settings/AgentProfileSettings.tsx +++ b/apps/frontend/src/renderer/components/settings/AgentProfileSettings.tsx @@ -1,6 +1,6 @@ -import { useState } from 'react'; +import { useState, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import { Brain, Scale, Zap, Check, Sparkles, ChevronDown, ChevronUp, RotateCcw } from 'lucide-react'; +import { Brain, Scale, Zap, Check, Sparkles, ChevronDown, ChevronUp, RotateCcw, Settings2 } from 'lucide-react'; import { cn } from '../../lib/utils'; import { DEFAULT_AGENT_PROFILES, @@ -29,7 +29,8 @@ const iconMap: Record = { Brain, Scale, Zap, - Sparkles + Sparkles, + Settings2 }; const PHASE_KEYS: Array = ['spec', 'planning', 'coding', 'qa']; @@ -37,45 +38,76 @@ const PHASE_KEYS: Array = ['spec', 'planning', 'coding', /** * Agent Profile Settings component * Displays preset agent profiles for quick model/thinking level configuration - * Used in the Settings page under Agent Settings + * All presets show phase configuration for full customization */ export function AgentProfileSettings() { const { t } = useTranslation('settings'); const settings = useSettingsStore((state) => state.settings); const selectedProfileId = settings.selectedAgentProfile || 'auto'; - const [showPhaseConfig, setShowPhaseConfig] = useState(selectedProfileId === 'auto'); + const [showPhaseConfig, setShowPhaseConfig] = useState(true); - // Get current phase config from settings or defaults - const currentPhaseModels: PhaseModelConfig = settings.customPhaseModels || DEFAULT_PHASE_MODELS; - const currentPhaseThinking: PhaseThinkingConfig = settings.customPhaseThinking || DEFAULT_PHASE_THINKING; + // Find the selected profile + const selectedProfile = useMemo(() => + DEFAULT_AGENT_PROFILES.find(p => p.id === selectedProfileId) || DEFAULT_AGENT_PROFILES[0], + [selectedProfileId] + ); + + // Get profile's default phase config + const profilePhaseModels = selectedProfile.phaseModels || DEFAULT_PHASE_MODELS; + const profilePhaseThinking = selectedProfile.phaseThinking || DEFAULT_PHASE_THINKING; + + // Get current phase config from settings (custom) or fall back to profile defaults + const currentPhaseModels: PhaseModelConfig = settings.customPhaseModels || profilePhaseModels; + const currentPhaseThinking: PhaseThinkingConfig = settings.customPhaseThinking || profilePhaseThinking; + + /** + * Check if current config differs from the selected profile's defaults + */ + const hasCustomConfig = useMemo((): boolean => { + if (!settings.customPhaseModels && !settings.customPhaseThinking) { + return false; // No custom settings, using profile defaults + } + return PHASE_KEYS.some( + phase => + currentPhaseModels[phase] !== profilePhaseModels[phase] || + currentPhaseThinking[phase] !== profilePhaseThinking[phase] + ); + }, [settings.customPhaseModels, settings.customPhaseThinking, currentPhaseModels, currentPhaseThinking, profilePhaseModels, profilePhaseThinking]); const handleSelectProfile = async (profileId: string) => { - const success = await saveSettings({ selectedAgentProfile: profileId }); + const profile = DEFAULT_AGENT_PROFILES.find(p => p.id === profileId); + if (!profile) return; + + // When selecting a preset, reset to that preset's defaults + const success = await saveSettings({ + selectedAgentProfile: profileId, + // Clear custom settings to use profile defaults + customPhaseModels: undefined, + customPhaseThinking: undefined + }); if (!success) { - // Log error for debugging - in future could show user toast notification console.error('Failed to save agent profile selection'); return; } - // Auto-expand phase config when Auto profile is selected - if (profileId === 'auto') { - setShowPhaseConfig(true); - } }; const handlePhaseModelChange = async (phase: keyof PhaseModelConfig, value: ModelTypeShort) => { + // Save as custom config (deviating from preset) const newPhaseModels = { ...currentPhaseModels, [phase]: value }; await saveSettings({ customPhaseModels: newPhaseModels }); }; const handlePhaseThinkingChange = async (phase: keyof PhaseThinkingConfig, value: ThinkingLevel) => { + // Save as custom config (deviating from preset) const newPhaseThinking = { ...currentPhaseThinking, [phase]: value }; await saveSettings({ customPhaseThinking: newPhaseThinking }); }; - const handleResetToDefaults = async () => { + const handleResetToProfileDefaults = async () => { + // Reset to the selected profile's defaults await saveSettings({ - customPhaseModels: DEFAULT_PHASE_MODELS, - customPhaseThinking: DEFAULT_PHASE_THINKING + customPhaseModels: undefined, + customPhaseThinking: undefined }); }; @@ -95,22 +127,12 @@ export function AgentProfileSettings() { return level?.label || thinkingValue; }; - /** - * Check if current phase config differs from defaults - */ - const hasCustomConfig = (): boolean => { - return PHASE_KEYS.some( - phase => - currentPhaseModels[phase] !== DEFAULT_PHASE_MODELS[phase] || - currentPhaseThinking[phase] !== DEFAULT_PHASE_THINKING[phase] - ); - }; - /** * Render a single profile card */ const renderProfileCard = (profile: AgentProfile) => { const isSelected = selectedProfileId === profile.id; + const isCustomized = isSelected && hasCustomConfig; const Icon = iconMap[profile.icon || 'Brain'] || Brain; return ( @@ -149,7 +171,14 @@ export function AgentProfileSettings() {
-

{profile.name}

+
+

{profile.name}

+ {isCustomized && ( + + {t('agentProfile.customized')} + + )} +

{profile.description}

@@ -187,110 +216,108 @@ export function AgentProfileSettings() { {DEFAULT_AGENT_PROFILES.map(renderProfileCard)}
- {/* Phase Configuration (only for Auto profile) */} - {selectedProfileId === 'auto' && ( -
- {/* Header - Collapsible */} - + {/* Phase Configuration - shown for all profiles */} +
+ {/* Header - Collapsible */} + - {/* Phase Configuration Content */} - {showPhaseConfig && ( -
- {/* Reset button */} - {hasCustomConfig() && ( -
- -
- )} + {/* Phase Configuration Content */} + {showPhaseConfig && ( +
+ {/* Reset button - shown when customized */} + {hasCustomConfig && ( +
+ +
+ )} - {/* Phase Configuration Grid */} -
- {PHASE_KEYS.map((phase) => ( -
-
- - - {t(`agentProfile.phases.${phase}.description`)} - + {/* Phase Configuration Grid */} +
+ {PHASE_KEYS.map((phase) => ( +
+
+ + + {t(`agentProfile.phases.${phase}.description`)} + +
+
+ {/* Model Select */} +
+ +
-
- {/* Model Select */} -
- - -
- {/* Thinking Level Select */} -
- - -
+ {/* Thinking Level Select */} +
+ +
- ))} -
- - {/* Info note */} -

- {t('agentProfile.phaseConfigNote')} -

+
+ ))}
- )} -
- )} + + {/* Info note */} +

+ {t('agentProfile.phaseConfigNote')} +

+
+ )} +
diff --git a/apps/frontend/src/renderer/components/settings/AppSettings.tsx b/apps/frontend/src/renderer/components/settings/AppSettings.tsx index ba2d2eb450..a68f33eba1 100644 --- a/apps/frontend/src/renderer/components/settings/AppSettings.tsx +++ b/apps/frontend/src/renderer/components/settings/AppSettings.tsx @@ -18,7 +18,8 @@ import { Monitor, Globe, Code, - Bug + Bug, + Server } from 'lucide-react'; // GitLab icon component (lucide-react doesn't have one) @@ -51,6 +52,7 @@ import { IntegrationSettings } from './IntegrationSettings'; import { AdvancedSettings } from './AdvancedSettings'; import { DevToolsSettings } from './DevToolsSettings'; import { DebugSettings } from './DebugSettings'; +import { ProfileList } from './ProfileList'; import { ProjectSelector } from './ProjectSelector'; import { ProjectSettingsContent, ProjectSettingsSection } from './ProjectSettingsContent'; import { useProjectStore } from '../../stores/project-store'; @@ -65,7 +67,7 @@ interface AppSettingsDialogProps { } // App-level settings sections -export type AppSection = 'appearance' | 'display' | 'language' | 'devtools' | 'agent' | 'paths' | 'integrations' | 'updates' | 'notifications' | 'debug'; +export type AppSection = 'appearance' | 'display' | 'language' | 'devtools' | 'agent' | 'paths' | 'integrations' | 'api-profiles' | 'updates' | 'notifications' | 'debug'; interface NavItemConfig { id: T; @@ -80,6 +82,7 @@ const appNavItemsConfig: NavItemConfig[] = [ { id: 'agent', icon: Bot }, { id: 'paths', icon: FolderOpen }, { id: 'integrations', icon: Key }, + { id: 'api-profiles', icon: Server }, { id: 'updates', icon: Package }, { id: 'notifications', icon: Bell }, { id: 'debug', icon: Bug } @@ -191,6 +194,8 @@ export function AppSettingsDialog({ open, onOpenChange, initialSection, initialP return ; case 'integrations': return ; + case 'api-profiles': + return ; case 'updates': return ; case 'notifications': diff --git a/apps/frontend/src/renderer/components/settings/DebugSettings.tsx b/apps/frontend/src/renderer/components/settings/DebugSettings.tsx index f97fc0bb5c..e8aab443c5 100644 --- a/apps/frontend/src/renderer/components/settings/DebugSettings.tsx +++ b/apps/frontend/src/renderer/components/settings/DebugSettings.tsx @@ -1,8 +1,12 @@ import { useState } from 'react'; import { useTranslation } from 'react-i18next'; -import { Bug, FolderOpen, Copy, FileText, RefreshCw, Loader2, Check, AlertCircle } from 'lucide-react'; +import { Bug, FolderOpen, Copy, FileText, RefreshCw, Loader2, Check, AlertCircle, Shield } from 'lucide-react'; import { Button } from '../ui/button'; +import { Switch } from '../ui/switch'; +import { Label } from '../ui/label'; import { SettingsSection } from './SettingsSection'; +import { useSettingsStore } from '../../stores/settings-store'; +import { notifySentryStateChanged } from '../../lib/sentry'; interface DebugInfo { systemInfo: Record; @@ -16,11 +20,28 @@ interface DebugInfo { */ export function DebugSettings() { const { t } = useTranslation('settings'); + const { settings, updateSettings } = useSettingsStore(); const [debugInfo, setDebugInfo] = useState(null); const [isLoading, setIsLoading] = useState(false); const [copySuccess, setCopySuccess] = useState(false); const [error, setError] = useState(null); + // Handle Sentry toggle + const handleSentryToggle = async (checked: boolean) => { + setError(null); + try { + const result = await window.electronAPI.saveSettings({ sentryEnabled: checked }); + if (result.success) { + updateSettings({ sentryEnabled: checked }); + notifySentryStateChanged(checked); + } else { + setError(t('debug.errorReporting.saveFailed', 'Failed to save error reporting setting')); + } + } catch (err) { + setError(t('debug.errorReporting.saveFailed', 'Failed to save error reporting setting')); + } + }; + const loadDebugInfo = async () => { setIsLoading(true); setError(null); @@ -65,6 +86,28 @@ export function DebugSettings() { description={t('debug.description', 'Access logs and debug information for troubleshooting')} >
+ {/* Error Reporting Toggle */} +
+
+
+ +
+ +

+ {t('debug.errorReporting.description', 'Send crash reports to help improve Auto Claude. No personal data or code is collected.')} +

+
+
+ +
+
+ {/* Quick Actions */}
+ ) : null} +
+
+ + {/* Dropdown panel - only show when we have models to display */} + {isOpen && !isLoading && !modelDiscoveryNotSupported && models.length > 0 && ( +
+ {/* Search input */} +
+
+ + setSearchQuery(e.target.value)} + placeholder={t('settings:modelSelect.searchPlaceholder')} + className="pl-8" + autoFocus + /> +
+
+ + {/* Model list */} +
+ {filteredModels.length === 0 ? ( +
+ {t('settings:modelSelect.noResults')} +
+ ) : ( + filteredModels.map((model) => ( + + )) + )} +
+
+ )} + + {/* Info/error messages below input */} + {modelDiscoveryNotSupported && ( +

+ + {t('settings:modelSelect.discoveryNotAvailable')} +

+ )} + {error && !modelDiscoveryNotSupported && ( +

{error}

+ )} +
+ ); +} diff --git a/apps/frontend/src/renderer/components/settings/ProfileEditDialog.test.tsx b/apps/frontend/src/renderer/components/settings/ProfileEditDialog.test.tsx new file mode 100644 index 0000000000..044a248558 --- /dev/null +++ b/apps/frontend/src/renderer/components/settings/ProfileEditDialog.test.tsx @@ -0,0 +1,701 @@ +/** + * @vitest-environment jsdom + */ +/** + * ProfileEditDialog Tests + * + * Tests both create and edit modes for the API profile dialog. + * Following Story 1.3: Edit Existing Profile + */ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { render, screen, fireEvent, waitFor } from '@testing-library/react'; +import '@testing-library/jest-dom'; +import '../../../shared/i18n'; +import { ProfileEditDialog } from './ProfileEditDialog'; +import type { APIProfile } from '@shared/types/profile'; + +// Mock the settings store +vi.mock('../../stores/settings-store', () => ({ + useSettingsStore: vi.fn() +})); + +import { useSettingsStore } from '../../stores/settings-store'; + +describe('ProfileEditDialog - Edit Mode', () => { + const mockOnOpenChange = vi.fn(); + const mockOnSaved = vi.fn(); + + const mockProfile: APIProfile = { + id: '123e4567-e89b-12d3-a456-426614174000', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-ant-api123-test-key-abc123', + models: { + default: 'claude-3-5-sonnet-20241022', + haiku: 'claude-3-5-haiku-20241022' + }, + createdAt: 1700000000000, + updatedAt: 1700000000000 + }; + + beforeEach(() => { + vi.clearAllMocks(); + // Mock store to return updateProfile action + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + saveProfile: vi.fn().mockResolvedValue(true), + profilesLoading: false, + profilesError: null + }); + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + // Test 5 from story: Pre-populated form data + it('should pre-populate all fields with existing values when editing', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + saveProfile: vi.fn().mockResolvedValue(true), + profilesLoading: false, + profilesError: null + }); + + render( + + ); + + // Verify all fields are pre-populated + await waitFor(() => { + expect(screen.getByLabelText(/name/i)).toHaveValue('Test Profile'); + expect(screen.getByLabelText(/base url/i)).toHaveValue('https://api.example.com'); + }); + + // Note: Model fields use ModelSearchableSelect component which doesn't use standard + // label/input associations. The model field functionality is tested via E2E tests. + }); + + // Test 6 from story: API key displays masked + it('should display masked API key in edit mode', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + saveProfile: vi.fn().mockResolvedValue(true), + profilesLoading: false, + profilesError: null + }); + + render( + + ); + + // API key field displays four mask characters (โ€ขโ€ขโ€ขโ€ข) plus only the last four characters of the full key + // Example: full key "sk-ant-api123-test-key-abc123" => masked display "โ€ขโ€ขโ€ขโ€ขc123" + await waitFor(() => { + const maskedInput = screen.getByDisplayValue(/โ€ขโ€ขโ€ขโ€ขc123/); + expect(maskedInput).toBeDisabled(); + }); + }); + + // Test 1 from story: Edit profile name + it('should update profile when form is modified and saved', async () => { + const mockUpdateFn = vi.fn().mockResolvedValue(true); + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: mockUpdateFn, + saveProfile: vi.fn().mockResolvedValue(true), + profilesLoading: false, + profilesError: null + }); + + render( + + ); + + // Wait for form to populate + await waitFor(() => { + expect(screen.getByLabelText(/name/i)).toHaveValue('Test Profile'); + }); + + // Change the name + const nameInput = screen.getByLabelText(/name/i); + fireEvent.change(nameInput, { target: { value: 'Updated Profile Name' } }); + + // Click save + const saveButton = screen.getByText(/save profile/i); + fireEvent.click(saveButton); + + // Verify updateProfile was called (not saveProfile) + await waitFor(() => { + expect(mockUpdateFn).toHaveBeenCalled(); + }); + }); + + // Dialog title should say "Edit Profile" in edit mode + it('should show "Edit Profile" title in edit mode', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + saveProfile: vi.fn().mockResolvedValue(true), + profilesLoading: false, + profilesError: null + }); + + render( + + ); + + await waitFor(() => { + expect(screen.getByText('Edit Profile')).toBeInTheDocument(); + }); + }); + + // Test 7 from story: Cancel button + it('should close dialog without saving when Cancel is clicked', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + saveProfile: vi.fn().mockResolvedValue(true), + profilesLoading: false, + profilesError: null + }); + + render( + + ); + + const cancelButton = screen.getByText('Cancel'); + fireEvent.click(cancelButton); + + await waitFor(() => { + expect(mockOnOpenChange).toHaveBeenCalledWith(false); + }); + }); + + // Test 8 from story: Models fields pre-populate + it('should pre-populate optional model fields with existing values', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + saveProfile: vi.fn().mockResolvedValue(true), + profilesLoading: false, + profilesError: null + }); + + render( + + ); + + await waitFor(() => { + expect(screen.getByLabelText(/name/i)).toHaveValue('Test Profile'); + }); + + // Find model inputs by their labels + const modelLabels = screen.getAllByText(/model/i); + expect(modelLabels.length).toBeGreaterThan(0); + }); +}); + +describe('ProfileEditDialog - Create Mode', () => { + const mockOnOpenChange = vi.fn(); + const mockOnSaved = vi.fn(); + + beforeEach(() => { + vi.clearAllMocks(); + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + saveProfile: vi.fn().mockResolvedValue(true), + profilesLoading: false, + profilesError: null + }); + }); + + // Dialog title should say "Add API Profile" in create mode + it('should show "Add API Profile" title in create mode', () => { + render( + + ); + + expect(screen.getByText('Add API Profile')).toBeInTheDocument(); + }); + + // Fields should be empty in create mode + it('should have empty fields in create mode', () => { + render( + + ); + + expect(screen.getByLabelText(/name/i)).toHaveValue(''); + expect(screen.getByLabelText(/base url/i)).toHaveValue(''); + }); + + // API key input should be normal (not masked) in create mode + it('should show normal API key input in create mode', () => { + render( + + ); + + const apiKeyInput = screen.getByLabelText(/api key/i); + expect(apiKeyInput).toHaveAttribute('type', 'password'); + expect(apiKeyInput).not.toBeDisabled(); + }); + + it('should apply preset values in create mode', async () => { + render( + + ); + + const presetTrigger = screen.getByLabelText(/preset/i); + fireEvent.keyDown(presetTrigger, { key: 'ArrowDown', code: 'ArrowDown' }); + + const glmGlobalOption = await screen.findByRole('option', { name: 'GLM (Global)' }); + fireEvent.click(glmGlobalOption); + + expect(screen.getByLabelText(/base url/i)).toHaveValue('https://api.z.ai/api/anthropic'); + expect(screen.getByLabelText(/name/i)).toHaveValue('GLM (Global)'); + }); + + it('should not overwrite name when applying a preset', async () => { + render( + + ); + + const nameInput = screen.getByLabelText(/name/i); + fireEvent.change(nameInput, { target: { value: 'My Custom Name' } }); + + const presetTrigger = screen.getByLabelText(/preset/i); + fireEvent.keyDown(presetTrigger, { key: 'ArrowDown', code: 'ArrowDown' }); + + const groqOption = await screen.findByRole('option', { name: 'Groq' }); + fireEvent.click(groqOption); + + expect(screen.getByLabelText(/name/i)).toHaveValue('My Custom Name'); + expect(screen.getByLabelText(/base url/i)).toHaveValue('https://api.groq.com/openai/v1'); + }); + + it('should move focus to Base URL after selecting a preset', async () => { + render( + + ); + + const presetTrigger = screen.getByLabelText(/preset/i); + fireEvent.keyDown(presetTrigger, { key: 'ArrowDown', code: 'ArrowDown' }); + + const anthropicOption = await screen.findByRole('option', { name: 'Anthropic' }); + fireEvent.click(anthropicOption); + + await waitFor(() => { + expect(screen.getByLabelText(/base url/i)).toHaveFocus(); + }); + }); +}); + +describe('ProfileEditDialog - Validation', () => { + const mockOnOpenChange = vi.fn(); + const mockProfile: APIProfile = { + id: 'test-id', + name: 'Test', + baseUrl: 'https://api.example.com', + apiKey: 'sk-ant-test123', + createdAt: Date.now(), + updatedAt: Date.now() + }; + + // Test 4 from story: Invalid Base URL validation + it('should show inline error for invalid Base URL', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + profilesLoading: false, + profilesError: null + }); + + render( + + ); + + await waitFor(() => { + expect(screen.getByLabelText(/base url/i)).toHaveValue('https://api.example.com'); + }); + + // Enter invalid URL + const urlInput = screen.getByLabelText(/base url/i); + fireEvent.change(urlInput, { target: { value: 'not-a-valid-url' } }); + + // Click save to trigger validation + const saveButton = screen.getByText(/save profile/i); + fireEvent.click(saveButton); + + // Should show error + await waitFor(() => { + expect(screen.getByText(/invalid url/i)).toBeInTheDocument(); + }); + }); + + // Test 2 from story: Edit profile name to duplicate existing name + it('should show error when editing to duplicate name', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(false), // Simulating duplicate name error + profilesLoading: false, + profilesError: 'A profile with this name already exists' + }); + + render( + + ); + + await waitFor(() => { + expect(screen.getByLabelText(/name/i)).toHaveValue('Test'); + }); + + // Change name to a duplicate + const nameInput = screen.getByLabelText(/name/i); + fireEvent.change(nameInput, { target: { value: 'Duplicate Name' } }); + + // Click save + const saveButton = screen.getByText(/save profile/i); + fireEvent.click(saveButton); + + // Should show error from store + await waitFor(() => { + expect(screen.getByText(/A profile with this name already exists/i)).toBeInTheDocument(); + }); + }); + + // Test 3 from story: Edit active profile + it('should keep profile active after editing', async () => { + const mockUpdateFn = vi.fn().mockResolvedValue(true); + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: mockUpdateFn, + profilesLoading: false, + profilesError: null, + profiles: [{ ...mockProfile, id: 'active-id' }], + activeProfileId: 'active-id' + }); + + const activeProfile: APIProfile = { + ...mockProfile, + id: 'active-id', + name: 'Active Profile' + }; + + render( + + ); + + await waitFor(() => { + expect(screen.getByLabelText(/name/i)).toHaveValue('Active Profile'); + }); + + // Change the name + const nameInput = screen.getByLabelText(/name/i); + fireEvent.change(nameInput, { target: { value: 'Updated Active Profile' } }); + + // Click save + const saveButton = screen.getByText(/save profile/i); + fireEvent.click(saveButton); + + // Verify updateProfile was called + await waitFor(() => { + expect(mockUpdateFn).toHaveBeenCalled(); + }); + }); +}); + +describe('ProfileEditDialog - Test Connection Feature', () => { + const mockOnOpenChange = vi.fn(); + const mockOnSaved = vi.fn(); + const mockTestConnection = vi.fn(); + + const mockProfile: APIProfile = { + id: 'test-id', + name: 'Test Profile', + baseUrl: 'https://api.example.com', + apiKey: 'sk-ant-test12345678', + createdAt: Date.now(), + updatedAt: Date.now() + }; + + beforeEach(() => { + vi.clearAllMocks(); + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + saveProfile: vi.fn().mockResolvedValue(true), + testConnection: mockTestConnection, + profilesLoading: false, + profilesError: null, + isTestingConnection: false, + testConnectionResult: null + }); + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + it('should show Test Connection button', async () => { + render( + + ); + + await waitFor(() => { + expect(screen.getByText('Test Connection')).toBeInTheDocument(); + }); + }); + + it('should call testConnection when button is clicked', async () => { + render( + + ); + + const testButton = await screen.findByText('Test Connection'); + fireEvent.click(testButton); + + await waitFor(() => { + expect(mockTestConnection).toHaveBeenCalledWith( + 'https://api.example.com', + 'sk-ant-test12345678', + expect.any(AbortSignal) + ); + }); + }); + + it('should show loading state while testing connection', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + testConnection: mockTestConnection, + profilesLoading: false, + profilesError: null, + isTestingConnection: true, + testConnectionResult: null + }); + + render( + + ); + + await waitFor(() => { + expect(screen.getByText('Testing...')).toBeInTheDocument(); + }); + + const testButton = screen.getByText('Testing...'); + expect(testButton).toBeDisabled(); + }); + + it('should show success message when connection succeeds', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + testConnection: mockTestConnection, + profilesLoading: false, + profilesError: null, + isTestingConnection: false, + testConnectionResult: { + success: true, + message: 'Connection successful' + } + }); + + render( + + ); + + await waitFor(() => { + expect(screen.getByText('Connection Successful')).toBeInTheDocument(); + expect(screen.getByText('Connection successful')).toBeInTheDocument(); + }); + }); + + it('should show error message when connection fails', async () => { + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + testConnection: mockTestConnection, + profilesLoading: false, + profilesError: null, + isTestingConnection: false, + testConnectionResult: { + success: false, + errorType: 'auth', + message: 'Authentication failed. Please check your API key.' + } + }); + + render( + + ); + + await waitFor(() => { + expect(screen.getByText('Connection Failed')).toBeInTheDocument(); + expect(screen.getByText('Authentication failed. Please check your API key.')).toBeInTheDocument(); + }); + }); + + it('should validate baseUrl before testing connection', async () => { + const testConnectionFn = vi.fn(); + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + testConnection: testConnectionFn, + profilesLoading: false, + profilesError: null, + isTestingConnection: false, + testConnectionResult: null + }); + + render( + + ); + + // Fill name (required to enable Test Connection button) + const nameInput = screen.getByLabelText(/name/i); + fireEvent.change(nameInput, { target: { value: 'Test Profile' } }); + + // Fill apiKey but leave baseUrl empty + const keyInput = screen.getByLabelText(/api key/i); + fireEvent.change(keyInput, { target: { value: 'sk-ant-test12345678' } }); + + // Test button should still be disabled since baseUrl is empty + const testButton = screen.getByText('Test Connection'); + expect(testButton).toBeDisabled(); + + // Should NOT call testConnection + expect(testConnectionFn).not.toHaveBeenCalled(); + }); + + it('should validate apiKey before testing connection', async () => { + const testConnectionFn = vi.fn(); + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + testConnection: testConnectionFn, + profilesLoading: false, + profilesError: null, + isTestingConnection: false, + testConnectionResult: null + }); + + render( + + ); + + // Fill name (required to enable Test Connection button) + const nameInput = screen.getByLabelText(/name/i); + fireEvent.change(nameInput, { target: { value: 'Test Profile' } }); + + // Fill baseUrl but leave apiKey empty + const urlInput = screen.getByLabelText(/base url/i); + fireEvent.change(urlInput, { target: { value: 'https://api.example.com' } }); + + // Test button should still be disabled since apiKey is empty + const testButton = screen.getByText('Test Connection'); + expect(testButton).toBeDisabled(); + + // Should NOT call testConnection + expect(testConnectionFn).not.toHaveBeenCalled(); + }); + + it('should use profile.apiKey when testing in edit mode without changing key', async () => { + const testConnectionFn = vi.fn(); + (useSettingsStore as unknown as ReturnType).mockReturnValue({ + updateProfile: vi.fn().mockResolvedValue(true), + testConnection: testConnectionFn, + profilesLoading: false, + profilesError: null, + isTestingConnection: false, + testConnectionResult: null + }); + + render( + + ); + + const testButton = await screen.findByText('Test Connection'); + fireEvent.click(testButton); + + await waitFor(() => { + expect(testConnectionFn).toHaveBeenCalledWith( + 'https://api.example.com', + 'sk-ant-test12345678', + expect.any(AbortSignal) + ); + }); + }); +}); diff --git a/apps/frontend/src/renderer/components/settings/ProfileEditDialog.tsx b/apps/frontend/src/renderer/components/settings/ProfileEditDialog.tsx new file mode 100644 index 0000000000..ed7ce46507 --- /dev/null +++ b/apps/frontend/src/renderer/components/settings/ProfileEditDialog.tsx @@ -0,0 +1,584 @@ +/** + * ProfileEditDialog - Dialog for creating/editing API profiles + * + * Allows users to configure custom Anthropic-compatible API endpoints. + * Supports all profile fields including optional model name mappings. + * + * Features: + * - Required fields: Name, Base URL, API Key + * - Optional model fields: Default, Haiku, Sonnet, Opus + * - Form validation with error display + * - Save button triggers store action (create or update) + * - Close button cancels without saving + * - Edit mode: pre-populates form with existing profile data + * - Edit mode: API key masked with "Change" button + */ +import { useState, useEffect, useRef } from 'react'; +import { Loader2, AlertCircle, CheckCircle2 } from 'lucide-react'; +import { useTranslation } from 'react-i18next'; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle +} from '../ui/dialog'; +import { Button } from '../ui/button'; +import { Input } from '../ui/input'; +import { Label } from '../ui/label'; +import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '../ui/select'; +import { useSettingsStore } from '../../stores/settings-store'; +import { ModelSearchableSelect } from './ModelSearchableSelect'; +import { useToast } from '../../hooks/use-toast'; +import { isValidUrl, isValidApiKey } from '../../lib/profile-utils'; +import type { APIProfile, ProfileFormData, TestConnectionResult } from '@shared/types/profile'; +import { maskApiKey } from '../../lib/profile-utils'; +import { API_PROVIDER_PRESETS } from '../../../shared/constants'; + +interface ProfileEditDialogProps { + /** Whether the dialog is open */ + open: boolean; + /** Callback when the dialog open state changes */ + onOpenChange: (open: boolean) => void; + /** Optional callback when profile is successfully saved */ + onSaved?: () => void; + /** Optional profile for edit mode (undefined = create mode) */ + profile?: APIProfile; +} + +export function ProfileEditDialog({ open, onOpenChange, onSaved, profile }: ProfileEditDialogProps) { + const { t } = useTranslation(); + const { + saveProfile, + updateProfile, + profilesLoading, + profilesError, + testConnection, + isTestingConnection, + testConnectionResult + } = useSettingsStore(); + const { toast } = useToast(); + + // Edit mode detection: profile prop determines mode + const isEditMode = !!profile; + + // Form state + const [name, setName] = useState(''); + const [baseUrl, setBaseUrl] = useState(''); + const [apiKey, setApiKey] = useState(''); + const [defaultModel, setDefaultModel] = useState(''); + const [haikuModel, setHaikuModel] = useState(''); + const [sonnetModel, setSonnetModel] = useState(''); + const [opusModel, setOpusModel] = useState(''); + const [presetId, setPresetId] = useState(''); + + // API key change state (for edit mode) + const [isChangingApiKey, setIsChangingApiKey] = useState(false); + + // Validation errors + const [nameError, setNameError] = useState(null); + const [urlError, setUrlError] = useState(null); + const [keyError, setKeyError] = useState(null); + + // AbortController ref for test connection cleanup + const abortControllerRef = useRef(null); + const baseUrlInputRef = useRef(null); + + // Local state for auto-hiding test result display + const [showTestResult, setShowTestResult] = useState(false); + + // Auto-hide test result after 5 seconds + useEffect(() => { + if (testConnectionResult) { + setShowTestResult(true); + const timeoutId = setTimeout(() => { + setShowTestResult(false); + }, 5000); + return () => clearTimeout(timeoutId); + } + }, [testConnectionResult]); + + // Cleanup AbortController when dialog closes or unmounts + useEffect(() => { + return () => { + abortControllerRef.current?.abort(); + abortControllerRef.current = null; + }; + }, []); + + // Reset form and pre-populate when dialog opens + // Note: Only reset when dialog opens/closes, not when profile prop changes + // This prevents race conditions if user rapidly clicks edit on different profiles + useEffect(() => { + if (open) { + if (isEditMode && profile) { + // Pre-populate form with existing profile data + setName(profile.name); + setBaseUrl(profile.baseUrl); + setApiKey(''); // Start empty - masked display shown instead + setDefaultModel(profile.models?.default || ''); + setHaikuModel(profile.models?.haiku || ''); + setSonnetModel(profile.models?.sonnet || ''); + setOpusModel(profile.models?.opus || ''); + setIsChangingApiKey(false); + setPresetId(''); + } else { + // Reset to empty form for create mode + setName(''); + setBaseUrl(''); + setApiKey(''); + setDefaultModel(''); + setHaikuModel(''); + setSonnetModel(''); + setOpusModel(''); + setIsChangingApiKey(false); + setPresetId(''); + } + // Clear validation errors + setNameError(null); + setUrlError(null); + setKeyError(null); + } else { + // Clear test result display when dialog closes + setShowTestResult(false); + } + }, [open]); + + const applyPreset = (id: string) => { + const preset = API_PROVIDER_PRESETS.find((item) => item.id === id); + if (!preset) return; + setPresetId(id); + setBaseUrl(preset.baseUrl); + if (!name.trim()) { + setName(t(preset.labelKey)); + } + }; + + // Validate form + const validateForm = (): boolean => { + let isValid = true; + + // Name validation + if (!name.trim()) { + setNameError(t('settings:apiProfiles.validation.nameRequired')); + isValid = false; + } else { + setNameError(null); + } + + // Base URL validation + if (!baseUrl.trim()) { + setUrlError(t('settings:apiProfiles.validation.baseUrlRequired')); + isValid = false; + } else if (!isValidUrl(baseUrl)) { + setUrlError(t('settings:apiProfiles.validation.baseUrlInvalid')); + isValid = false; + } else { + setUrlError(null); + } + + // API Key validation (only in create mode or when changing key in edit mode) + if (!isEditMode || isChangingApiKey) { + if (!apiKey.trim()) { + setKeyError(t('settings:apiProfiles.validation.apiKeyRequired')); + isValid = false; + } else if (!isValidApiKey(apiKey)) { + setKeyError(t('settings:apiProfiles.validation.apiKeyInvalid')); + isValid = false; + } else { + setKeyError(null); + } + } else { + setKeyError(null); + } + + return isValid; + }; + + // Handle test connection + const handleTestConnection = async () => { + // Determine API key to use for testing + const apiKeyForTest = isEditMode && !isChangingApiKey && profile + ? profile.apiKey + : apiKey; + + // Basic validation before testing + if (!baseUrl.trim()) { + setUrlError(t('settings:apiProfiles.validation.baseUrlRequired')); + return; + } + if (!apiKeyForTest.trim()) { + setKeyError(t('settings:apiProfiles.validation.apiKeyRequired')); + return; + } + + // Create AbortController for this test + abortControllerRef.current = new AbortController(); + + await testConnection(baseUrl.trim(), apiKeyForTest.trim(), abortControllerRef.current.signal); + }; + + // Check if form has minimum required fields for test connection + const isFormValidForTest = () => { + if (!name.trim() || !baseUrl.trim()) { + return false; + } + // In create mode or when changing key, need apiKey + if (!isEditMode || isChangingApiKey) { + return apiKey.trim().length > 0; + } + // In edit mode without changing key, existing profile has apiKey + return true; + }; + + // Handle save + const handleSave = async () => { + if (!validateForm()) { + return; + } + + if (isEditMode && profile) { + // Update existing profile + const updatedProfile: APIProfile = { + ...profile, + name: name.trim(), + baseUrl: baseUrl.trim(), + // Only update API key if user is changing it + ...(isChangingApiKey && { apiKey: apiKey.trim() }), + // Update models if provided + ...(defaultModel || haikuModel || sonnetModel || opusModel ? { + models: { + ...(defaultModel && { default: defaultModel.trim() }), + ...(haikuModel && { haiku: haikuModel.trim() }), + ...(sonnetModel && { sonnet: sonnetModel.trim() }), + ...(opusModel && { opus: opusModel.trim() }) + } + } : { models: undefined }) + }; + const success = await updateProfile(updatedProfile); + if (success) { + toast({ + title: t('settings:apiProfiles.toast.update.title'), + description: t('settings:apiProfiles.toast.update.description', { + name: name.trim() + }), + }); + onOpenChange(false); + onSaved?.(); + } + } else { + // Create new profile + const profileData: ProfileFormData = { + name: name.trim(), + baseUrl: baseUrl.trim(), + apiKey: apiKey.trim() + }; + + // Add optional models if provided + if (defaultModel || haikuModel || sonnetModel || opusModel) { + profileData.models = {}; + if (defaultModel) profileData.models.default = defaultModel.trim(); + if (haikuModel) profileData.models.haiku = haikuModel.trim(); + if (sonnetModel) profileData.models.sonnet = sonnetModel.trim(); + if (opusModel) profileData.models.opus = opusModel.trim(); + } + + const success = await saveProfile(profileData); + if (success) { + toast({ + title: t('settings:apiProfiles.toast.create.title'), + description: t('settings:apiProfiles.toast.create.description', { + name: name.trim() + }), + }); + onOpenChange(false); + onSaved?.(); + } + } + }; + + return ( + + + + + {isEditMode + ? t('settings:apiProfiles.dialog.editTitle') + : t('settings:apiProfiles.dialog.createTitle')} + + + {t('settings:apiProfiles.dialog.description')} + + + +
+
+ {/* Name field (required) */} +
+ + setName(e.target.value)} + className={nameError ? 'border-destructive' : ''} + /> + {nameError &&

{nameError}

} +
+ + {!isEditMode && ( +
+ + +

+ {t('settings:apiProfiles.hints.preset')} +

+
+ )} +
+ +
+ {/* Base URL field (required) */} +
+ + setBaseUrl(e.target.value)} + className={urlError ? 'border-destructive' : ''} + /> + {urlError &&

{urlError}

} +

+ {t('settings:apiProfiles.hints.baseUrl')} +

+
+ + {/* API Key field (required for create, masked in edit mode) */} +
+ + {isEditMode && !isChangingApiKey && profile ? ( + // Edit mode: show masked API key +
+ + +
+ ) : ( + // Create mode or changing key: show password input + <> + setApiKey(e.target.value)} + className={keyError ? 'border-destructive' : ''} + /> + {isEditMode && ( + + )} + + )} + {keyError &&

{keyError}

} +
+
+ + {/* Test Connection button */} + + + {/* Inline connection test result */} + {showTestResult && testConnectionResult && ( +
+ {testConnectionResult.success ? ( + + ) : ( + + )} +
+

+ {testConnectionResult.success + ? t('settings:apiProfiles.testConnection.success') + : t('settings:apiProfiles.testConnection.failure')} +

+

+ {testConnectionResult.message} +

+
+
+ )} + + {/* Optional model mappings */} +
+ +

+ {t('settings:apiProfiles.models.description')} +

+ +
+
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+
+
+ + {/* General error display */} + {profilesError && ( +
+

{profilesError}

+
+ )} +
+ + + + + +
+
+ ); +} diff --git a/apps/frontend/src/renderer/components/settings/ProfileList.test.tsx b/apps/frontend/src/renderer/components/settings/ProfileList.test.tsx new file mode 100644 index 0000000000..85725dc15a --- /dev/null +++ b/apps/frontend/src/renderer/components/settings/ProfileList.test.tsx @@ -0,0 +1,309 @@ +/** + * @vitest-environment jsdom + */ +/** + * Component and utility tests for ProfileList + * Tests utility functions and verifies component structure + */ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import '@testing-library/jest-dom/vitest'; +import { render, screen, fireEvent } from '@testing-library/react'; +import { ProfileList } from './ProfileList'; +import { maskApiKey } from '../../lib/profile-utils'; +import { useSettingsStore } from '../../stores/settings-store'; +import type { APIProfile } from '@shared/types/profile'; +import { TooltipProvider } from '../ui/tooltip'; +import i18n from '../../../shared/i18n'; + +// Wrapper for components that need TooltipProvider +function TestWrapper({ children }: { children: React.ReactNode }) { + return {children}; +} + +// Custom render with wrapper +function renderWithWrapper(ui: React.ReactElement) { + return render(ui, { wrapper: TestWrapper }); +} + +// Mock the settings store +vi.mock('../../stores/settings-store', () => ({ + useSettingsStore: vi.fn() +})); + +// Mock the toast hook +vi.mock('../../hooks/use-toast', () => ({ + useToast: () => ({ + toast: vi.fn() + }) +})); + +// Test profile data +const testProfiles: APIProfile[] = [ + { + id: 'profile-1', + name: 'Production API', + baseUrl: 'https://api.anthropic.com', + apiKey: 'sk-ant-prod-key-1234', + models: { default: 'claude-3-5-sonnet-20241022' }, + createdAt: Date.now(), + updatedAt: Date.now() + }, + { + id: 'profile-2', + name: 'Development API', + baseUrl: 'https://dev-api.example.com/v1', + apiKey: 'sk-ant-test-key-5678', + models: undefined, + createdAt: Date.now(), + updatedAt: Date.now() + } +]; + +/** + * Factory function to create a default settings store mock + * Override properties by spreading with custom values + */ +function createSettingsStoreMock(overrides: Partial> = {}) { + const mockDeleteProfile = vi.fn().mockResolvedValue(true); + const mockSetActiveProfile = vi.fn().mockResolvedValue(true); + + return { + profiles: testProfiles, + activeProfileId: 'profile-1' as string | null, + deleteProfile: mockDeleteProfile, + setActiveProfile: mockSetActiveProfile, + profilesLoading: false, + settings: {} as any, + isLoading: false, + error: null, + setSettings: vi.fn(), + updateSettings: vi.fn(), + setLoading: vi.fn(), + setError: vi.fn(), + setProfiles: vi.fn(), + setProfilesLoading: vi.fn(), + setProfilesError: vi.fn(), + saveProfile: vi.fn().mockResolvedValue(true), + updateProfile: vi.fn().mockResolvedValue(true), + profilesError: null, + ...overrides + }; +} + +describe('ProfileList - maskApiKey Utility', () => { + it('should mask API key showing only last 4 characters', () => { + const apiKey = 'sk-ant-prod-key-1234'; + const masked = maskApiKey(apiKey); + expect(masked).toBe('โ€ขโ€ขโ€ขโ€ข1234'); + }); + + it('should return dots for keys with 4 or fewer characters', () => { + expect(maskApiKey('key')).toBe('โ€ขโ€ขโ€ขโ€ข'); + expect(maskApiKey('1234')).toBe('โ€ขโ€ขโ€ขโ€ข'); + expect(maskApiKey('')).toBe('โ€ขโ€ขโ€ขโ€ข'); + }); + + it('should handle undefined or null keys', () => { + expect(maskApiKey(undefined as unknown as string)).toBe('โ€ขโ€ขโ€ขโ€ข'); + expect(maskApiKey(null as unknown as string)).toBe('โ€ขโ€ขโ€ขโ€ข'); + }); + + it('should mask long API keys correctly', () => { + const longKey = 'sk-ant-api03-very-long-key-abc123xyz789'; + const masked = maskApiKey(longKey); + expect(masked).toBe('โ€ขโ€ขโ€ขโ€ขz789'); // Last 4 chars + expect(masked.length).toBe(8); // 4 dots + 4 chars + }); + + it('should mask keys with exactly 5 characters', () => { + const key = 'abcde'; + const masked = maskApiKey(key); + expect(masked).toBe('โ€ขโ€ขโ€ขโ€ขbcde'); // Last 4 chars when length > 4 + }); +}); + +describe('ProfileList - Profile Data Structure', () => { + it('should have valid API profile structure', () => { + expect(testProfiles[0]).toMatchObject({ + id: expect.any(String), + name: expect.any(String), + baseUrl: expect.any(String), + apiKey: expect.any(String), + models: expect.any(Object) + }); + }); + + it('should support profiles without optional models field', () => { + expect(testProfiles[1].models).toBeUndefined(); + }); + + it('should have non-empty required fields', () => { + testProfiles.forEach(profile => { + expect(profile.id).toBeTruthy(); + expect(profile.name).toBeTruthy(); + expect(profile.baseUrl).toBeTruthy(); + expect(profile.apiKey).toBeTruthy(); + }); + }); +}); + +describe('ProfileList - Component Export', () => { + it('should be able to import ProfileList component', async () => { + const { ProfileList } = await import('./ProfileList'); + expect(ProfileList).toBeDefined(); + expect(typeof ProfileList).toBe('function'); + }); + + it('should be a named export', async () => { + const module = await import('./ProfileList'); + expect(Object.keys(module)).toContain('ProfileList'); + }); +}); + +describe('ProfileList - URL Extraction', () => { + it('should extract host from valid URLs', () => { + const url1 = new URL(testProfiles[0].baseUrl); + expect(url1.host).toBe('api.anthropic.com'); + + const url2 = new URL(testProfiles[1].baseUrl); + expect(url2.host).toBe('dev-api.example.com'); + }); + + it('should handle URLs with paths', () => { + const url = new URL('https://api.example.com/v1/messages'); + expect(url.host).toBe('api.example.com'); + expect(url.pathname).toBe('/v1/messages'); + }); + + it('should handle URLs with ports', () => { + const url = new URL('https://localhost:8080/api'); + expect(url.host).toBe('localhost:8080'); + }); +}); + +describe('ProfileList - Active Profile Logic', () => { + it('should identify active profile correctly', () => { + const activeProfileId = 'profile-1'; + const activeProfile = testProfiles.find(p => p.id === activeProfileId); + expect(activeProfile?.id).toBe('profile-1'); + expect(activeProfile?.name).toBe('Production API'); + }); + + it('should return undefined for non-matching profile', () => { + const activeProfileId = 'non-existent'; + const activeProfile = testProfiles.find(p => p.id === activeProfileId); + expect(activeProfile).toBeUndefined(); + }); + + it('should handle null active profile ID', () => { + const activeProfileId = null; + const activeProfile = testProfiles.find(p => p.id === activeProfileId); + expect(activeProfile).toBeUndefined(); + }); +}); + +// Test 1: Delete confirmation dialog shows profile name correctly +describe('ProfileList - Delete Confirmation Dialog', () => { + beforeEach(() => { + vi.mocked(useSettingsStore).mockReturnValue( + createSettingsStoreMock({ activeProfileId: 'profile-2' }) + ); + }); + + it('should show delete confirmation dialog with profile name', () => { + renderWithWrapper(); + + // Click delete button on first profile (find by test id) + const deleteButton = screen.getByTestId('profile-delete-button-profile-1'); + fireEvent.click(deleteButton); + + // Check dialog appears with profile name + expect(screen.getByText(i18n.t('settings:apiProfiles.dialog.deleteTitle'))).toBeInTheDocument(); + expect(screen.getByText( + i18n.t('settings:apiProfiles.dialog.deleteDescription', { name: 'Production API' }) + )).toBeInTheDocument(); + expect(screen.getByText(i18n.t('settings:apiProfiles.dialog.cancel'))).toBeInTheDocument(); + expect(screen.getByText(i18n.t('settings:apiProfiles.dialog.delete'))).toBeInTheDocument(); + }); + + // Test 5: Cancel delete โ†’ dialog closes, profile remains in list + it('should close dialog when cancel is clicked', async () => { + const mockStore = createSettingsStoreMock({ activeProfileId: 'profile-2' }); + vi.mocked(useSettingsStore).mockReturnValue(mockStore); + + renderWithWrapper(); + + // Click delete button (find by test id) + const deleteButton = screen.getByTestId('profile-delete-button-profile-1'); + fireEvent.click(deleteButton); + + // Click cancel + const cancelButton = await screen.findByText(i18n.t('settings:apiProfiles.dialog.cancel')); + fireEvent.click(cancelButton); + + // Dialog should be closed + expect(screen.queryByText( + i18n.t('settings:apiProfiles.dialog.deleteTitle') + )).not.toBeInTheDocument(); + // Profiles should still be visible + expect(screen.getByText('Production API')).toBeInTheDocument(); + expect(mockStore.deleteProfile).not.toHaveBeenCalled(); + }); + + // Test 6: Delete confirmation dialog has delete action button + it('should show delete action button in confirmation dialog', () => { + vi.mocked(useSettingsStore).mockReturnValue( + createSettingsStoreMock({ activeProfileId: 'profile-2' }) + ); + + renderWithWrapper(); + + // Click delete button on inactive profile (find by test id) + const deleteButton = screen.getByTestId('profile-delete-button-profile-1'); + fireEvent.click(deleteButton); + + // Dialog should have Delete elements (title "Delete Profile?" and "Delete" button) + expect(screen.getByText(i18n.t('settings:apiProfiles.dialog.deleteTitle'))).toBeInTheDocument(); + expect(screen.getByText(i18n.t('settings:apiProfiles.dialog.delete'))).toBeInTheDocument(); + }); +}); + +describe('ProfileList - Switch to OAuth Button', () => { + beforeEach(() => { + vi.mocked(useSettingsStore).mockReturnValue(createSettingsStoreMock()); + }); + + it('should show "Switch to OAuth" button when a profile is active', () => { + renderWithWrapper(); + + // Button should be visible when activeProfileId is set + expect(screen.getByText(i18n.t('settings:apiProfiles.switchToOauth.label'))).toBeInTheDocument(); + }); + + it('should NOT show "Switch to OAuth" button when no profile is active', () => { + vi.mocked(useSettingsStore).mockReturnValue( + createSettingsStoreMock({ activeProfileId: null }) + ); + + renderWithWrapper(); + + // Button should NOT be visible when activeProfileId is null + expect(screen.queryByText( + i18n.t('settings:apiProfiles.switchToOauth.label') + )).not.toBeInTheDocument(); + }); + + it('should call setActiveProfile with null when "Switch to OAuth" is clicked', () => { + const mockStore = createSettingsStoreMock(); + vi.mocked(useSettingsStore).mockReturnValue(mockStore); + + renderWithWrapper(); + + // Click the "Switch to OAuth" button + const switchButton = screen.getByText(i18n.t('settings:apiProfiles.switchToOauth.label')); + fireEvent.click(switchButton); + + // Should call setActiveProfile with null to switch to OAuth + expect(mockStore.setActiveProfile).toHaveBeenCalledWith(null); + }); +}); diff --git a/apps/frontend/src/renderer/components/settings/ProfileList.tsx b/apps/frontend/src/renderer/components/settings/ProfileList.tsx new file mode 100644 index 0000000000..e01e71efea --- /dev/null +++ b/apps/frontend/src/renderer/components/settings/ProfileList.tsx @@ -0,0 +1,336 @@ +/** + * ProfileList - Display and manage API profiles + * + * Shows all configured API profiles with an "Add Profile" button. + * Displays empty state when no profiles exist. + * Allows setting active profile, editing, and deleting profiles. + */ +import { useState } from 'react'; +import { Plus, Trash2, Check, Server, Globe, Pencil } from 'lucide-react'; +import { useTranslation } from 'react-i18next'; +import { Button } from '../ui/button'; +import { Tooltip, TooltipContent, TooltipTrigger } from '../ui/tooltip'; +import { useSettingsStore } from '../../stores/settings-store'; +import { ProfileEditDialog } from './ProfileEditDialog'; +import { maskApiKey } from '../../lib/profile-utils'; +import { cn } from '../../lib/utils'; +import { useToast } from '../../hooks/use-toast'; +import type { APIProfile } from '@shared/types/profile'; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle +} from '../ui/alert-dialog'; + +interface ProfileListProps { + /** Optional callback when a profile is saved */ + onProfileSaved?: () => void; +} + +export function ProfileList({ onProfileSaved }: ProfileListProps) { + const { t } = useTranslation(); + const { + profiles, + activeProfileId, + deleteProfile, + setActiveProfile, + profilesError + } = useSettingsStore(); + + const { toast } = useToast(); + + const [isAddDialogOpen, setIsAddDialogOpen] = useState(false); + const [editProfile, setEditProfile] = useState(null); + const [deleteConfirmProfile, setDeleteConfirmProfile] = useState(null); + const [isDeleting, setIsDeleting] = useState(false); + const [isSettingActive, setIsSettingActive] = useState(false); + + const handleDeleteProfile = async () => { + if (!deleteConfirmProfile) return; + + setIsDeleting(true); + const success = await deleteProfile(deleteConfirmProfile.id); + setIsDeleting(false); + + if (success) { + toast({ + title: t('settings:apiProfiles.toast.delete.title'), + description: t('settings:apiProfiles.toast.delete.description', { + name: deleteConfirmProfile.name + }), + }); + setDeleteConfirmProfile(null); + if (onProfileSaved) { + onProfileSaved(); + } + } else { + // Show error toast - handles both active profile error and other errors + toast({ + variant: 'destructive', + title: t('settings:apiProfiles.toast.delete.errorTitle'), + description: profilesError || t('settings:apiProfiles.toast.delete.errorFallback'), + }); + } + }; + + /** + * Handle setting a profile as active or switching to OAuth + * @param profileId - The profile ID to activate, or null to switch to OAuth + */ + const handleSetActiveProfile = async (profileId: string | null) => { + // Allow switching to OAuth (null) even when no profile is active + if (profileId !== null && profileId === activeProfileId) return; + + setIsSettingActive(true); + const success = await setActiveProfile(profileId); + setIsSettingActive(false); + + if (success) { + // Show success toast + if (profileId === null) { + // Switched to OAuth + toast({ + title: t('settings:apiProfiles.toast.switch.oauthTitle'), + description: t('settings:apiProfiles.toast.switch.oauthDescription'), + }); + } else { + // Switched to profile + const activeProfile = profiles.find(p => p.id === profileId); + if (activeProfile) { + toast({ + title: t('settings:apiProfiles.toast.switch.profileTitle'), + description: t('settings:apiProfiles.toast.switch.profileDescription', { + name: activeProfile.name + }), + }); + } + } + if (onProfileSaved) { + onProfileSaved(); + } + } else { + // Show error toast on failure + toast({ + variant: 'destructive', + title: t('settings:apiProfiles.toast.switch.errorTitle'), + description: profilesError || t('settings:apiProfiles.toast.switch.errorFallback'), + }); + } + }; + + const getHostFromUrl = (url: string): string => { + try { + const urlObj = new URL(url); + return urlObj.host; + } catch { + return url; + } + }; + + return ( +
+ {/* Header with Add button */} +
+
+

{t('settings:apiProfiles.title')}

+

+ {t('settings:apiProfiles.description')} +

+
+ +
+ + {/* Empty state */} + {profiles.length === 0 && ( +
+ +

{t('settings:apiProfiles.empty.title')}

+

+ {t('settings:apiProfiles.empty.description')} +

+ +
+ )} + + {/* Profile list */} + {profiles.length > 0 && ( +
+ {/* Switch to OAuth button (visible when a profile is active) */} + {activeProfileId && ( +
+ +
+ )} + {profiles.map((profile) => { + const isActive = activeProfileId === profile.id; + return ( +
+
+
+

{profile.name}

+ {activeProfileId === profile.id && ( + + + {t('settings:apiProfiles.activeBadge')} + + )} +
+
+ + +
+ + + {getHostFromUrl(profile.baseUrl)} + +
+
+ +

{profile.baseUrl}

+
+
+
+ {maskApiKey(profile.apiKey)} +
+
+ {profile.models && Object.keys(profile.models).length > 0 && ( +
+ {t('settings:apiProfiles.customModels', { + models: Object.keys(profile.models).join(', ') + })} +
+ )} +
+ +
+ {activeProfileId !== profile.id && ( + + )} + + + + + {t('settings:apiProfiles.tooltips.edit')} + + + + + + + {isActive + ? t('settings:apiProfiles.tooltips.deleteActive') + : t('settings:apiProfiles.tooltips.deleteInactive')} + + +
+
+ ); + })} +
+ )} + + {/* Add/Edit Dialog */} + { + if (!open) { + setIsAddDialogOpen(false); + setEditProfile(null); + } + }} + onSaved={() => { + setIsAddDialogOpen(false); + setEditProfile(null); + onProfileSaved?.(); + }} + profile={editProfile ?? undefined} + /> + + {/* Delete Confirmation Dialog */} + setDeleteConfirmProfile(null)} + > + + + {t('settings:apiProfiles.dialog.deleteTitle')} + + {t('settings:apiProfiles.dialog.deleteDescription', { + name: deleteConfirmProfile?.name ?? '' + })} + + + + + {t('settings:apiProfiles.dialog.cancel')} + + + {isDeleting + ? t('settings:apiProfiles.dialog.deleting') + : t('settings:apiProfiles.dialog.delete')} + + + + +
+ ); +} diff --git a/apps/frontend/src/renderer/components/settings/integrations/GitHubIntegration.tsx b/apps/frontend/src/renderer/components/settings/integrations/GitHubIntegration.tsx index 6fa3978117..9f4405fc53 100644 --- a/apps/frontend/src/renderer/components/settings/integrations/GitHubIntegration.tsx +++ b/apps/frontend/src/renderer/components/settings/integrations/GitHubIntegration.tsx @@ -7,7 +7,7 @@ import { Separator } from '../../ui/separator'; import { Button } from '../../ui/button'; import { GitHubOAuthFlow } from '../../project-settings/GitHubOAuthFlow'; import { PasswordInput } from '../../project-settings/PasswordInput'; -import type { ProjectEnvConfig, GitHubSyncStatus } from '../../../../shared/types'; +import type { ProjectEnvConfig, GitHubSyncStatus, ProjectSettings } from '../../../../shared/types'; // Debug logging const DEBUG = process.env.NODE_ENV === 'development' || process.env.DEBUG === 'true'; @@ -35,6 +35,9 @@ interface GitHubIntegrationProps { gitHubConnectionStatus: GitHubSyncStatus | null; isCheckingGitHub: boolean; projectPath?: string; // Project path for fetching git branches + // Project settings for mainBranch (used by kanban tasks and terminal worktrees) + settings?: ProjectSettings; + setSettings?: React.Dispatch>; } /** @@ -48,7 +51,9 @@ export function GitHubIntegration({ setShowGitHubToken: _setShowGitHubToken, gitHubConnectionStatus, isCheckingGitHub, - projectPath + projectPath, + settings, + setSettings }: GitHubIntegrationProps) { const [authMode, setAuthMode] = useState<'manual' | 'oauth' | 'oauth-success'>('manual'); const [oauthUsername, setOauthUsername] = useState(null); @@ -84,6 +89,24 @@ export function GitHubIntegration({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [envConfig?.githubEnabled, projectPath]); + /** + * Handler for branch selection changes. + * Updates BOTH project.settings.mainBranch (for Electron app) and envConfig.defaultBranch (for CLI backward compatibility). + */ + const handleBranchChange = (branch: string) => { + debugLog('handleBranchChange: Updating branch to:', branch); + + // Update project settings (primary source for Electron app) + if (setSettings) { + setSettings(prev => ({ ...prev, mainBranch: branch })); + debugLog('handleBranchChange: Updated settings.mainBranch'); + } + + // Also update envConfig for CLI backward compatibility + updateEnvConfig({ defaultBranch: branch }); + debugLog('handleBranchChange: Updated envConfig.defaultBranch'); + }; + const fetchBranches = async () => { if (!projectPath) { debugLog('fetchBranches: No projectPath, skipping'); @@ -104,14 +127,15 @@ export function GitHubIntegration({ setBranches(result.data); debugLog('fetchBranches: Loaded branches:', result.data.length); - // Auto-detect default branch if not set - if (!envConfig?.defaultBranch) { - debugLog('fetchBranches: No defaultBranch set, auto-detecting...'); + // Auto-detect default branch if not set in project settings + // Priority: settings.mainBranch > envConfig.defaultBranch > auto-detect + if (!settings?.mainBranch && !envConfig?.defaultBranch) { + debugLog('fetchBranches: No branch set, auto-detecting...'); const detectResult = await window.electronAPI.detectMainBranch(projectPath); debugLog('fetchBranches: detectMainBranch result:', detectResult); if (detectResult.success && detectResult.data) { debugLog('fetchBranches: Auto-detected default branch:', detectResult.data); - updateEnvConfig({ defaultBranch: detectResult.data }); + handleBranchChange(detectResult.data); } } } else { @@ -314,10 +338,10 @@ export function GitHubIntegration({ {projectPath && ( updateEnvConfig({ defaultBranch: branch })} + onSelect={handleBranchChange} onRefresh={fetchBranches} /> )} diff --git a/apps/frontend/src/renderer/components/settings/integrations/GitLabIntegration.tsx b/apps/frontend/src/renderer/components/settings/integrations/GitLabIntegration.tsx index 292e0fb3dc..3d4618b0f9 100644 --- a/apps/frontend/src/renderer/components/settings/integrations/GitLabIntegration.tsx +++ b/apps/frontend/src/renderer/components/settings/integrations/GitLabIntegration.tsx @@ -7,7 +7,7 @@ import { Switch } from '../../ui/switch'; import { Separator } from '../../ui/separator'; import { Button } from '../../ui/button'; import { PasswordInput } from '../../project-settings/PasswordInput'; -import type { ProjectEnvConfig, GitLabSyncStatus } from '../../../../shared/types'; +import type { ProjectEnvConfig, GitLabSyncStatus, ProjectSettings } from '../../../../shared/types'; // Debug logging const DEBUG = process.env.NODE_ENV === 'development' || process.env.DEBUG === 'true'; @@ -35,6 +35,9 @@ interface GitLabIntegrationProps { gitLabConnectionStatus: GitLabSyncStatus | null; isCheckingGitLab: boolean; projectPath?: string; + // Project settings for mainBranch (used by kanban tasks and terminal worktrees) + settings?: ProjectSettings; + setSettings?: React.Dispatch>; } /** @@ -49,7 +52,9 @@ export function GitLabIntegration({ setShowGitLabToken: _setShowGitLabToken, gitLabConnectionStatus, isCheckingGitLab, - projectPath + projectPath, + settings, + setSettings }: GitLabIntegrationProps) { const { t } = useTranslation('gitlab'); const [authMode, setAuthMode] = useState<'manual' | 'oauth' | 'oauth-success'>('manual'); @@ -116,6 +121,24 @@ export function GitLabIntegration({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [envConfig?.gitlabEnabled, projectPath]); + /** + * Handler for branch selection changes. + * Updates BOTH project.settings.mainBranch (for Electron app) and envConfig.defaultBranch (for CLI backward compatibility). + */ + const handleBranchChange = (branch: string) => { + debugLog('handleBranchChange: Updating branch to:', branch); + + // Update project settings (primary source for Electron app) + if (setSettings) { + setSettings(prev => ({ ...prev, mainBranch: branch })); + debugLog('handleBranchChange: Updated settings.mainBranch'); + } + + // Also update envConfig for CLI backward compatibility + updateEnvConfig({ defaultBranch: branch }); + debugLog('handleBranchChange: Updated envConfig.defaultBranch'); + }; + const fetchBranches = async () => { if (!projectPath) { debugLog('fetchBranches: No projectPath, skipping'); @@ -135,14 +158,15 @@ export function GitLabIntegration({ setBranches(result.data); debugLog('fetchBranches: Loaded branches:', result.data.length); - // Auto-detect default branch if not set - if (!envConfig?.defaultBranch) { - debugLog('fetchBranches: No defaultBranch set, auto-detecting...'); + // Auto-detect default branch if not set in project settings + // Priority: settings.mainBranch > envConfig.defaultBranch > auto-detect + if (!settings?.mainBranch && !envConfig?.defaultBranch) { + debugLog('fetchBranches: No branch set, auto-detecting...'); const detectResult = await window.electronAPI.detectMainBranch(projectPath); debugLog('fetchBranches: detectMainBranch result:', detectResult); if (detectResult.success && detectResult.data) { debugLog('fetchBranches: Auto-detected default branch:', detectResult.data); - updateEnvConfig({ defaultBranch: detectResult.data }); + handleBranchChange(detectResult.data); } } } else { @@ -515,10 +539,10 @@ export function GitLabIntegration({ {projectPath && ( updateEnvConfig({ defaultBranch: branch })} + onSelect={handleBranchChange} onRefresh={fetchBranches} /> )} diff --git a/apps/frontend/src/renderer/components/settings/sections/SectionRouter.tsx b/apps/frontend/src/renderer/components/settings/sections/SectionRouter.tsx index ec171deb5b..27dbdd8a0d 100644 --- a/apps/frontend/src/renderer/components/settings/sections/SectionRouter.tsx +++ b/apps/frontend/src/renderer/components/settings/sections/SectionRouter.tsx @@ -136,6 +136,8 @@ export function SectionRouter({ gitHubConnectionStatus={gitHubConnectionStatus} isCheckingGitHub={isCheckingGitHub} projectPath={project.path} + settings={settings} + setSettings={setSettings} /> @@ -160,6 +162,8 @@ export function SectionRouter({ gitLabConnectionStatus={gitLabConnectionStatus} isCheckingGitLab={isCheckingGitLab} projectPath={project.path} + settings={settings} + setSettings={setSettings} /> diff --git a/apps/frontend/src/renderer/components/task-detail/TaskDetailModal.tsx b/apps/frontend/src/renderer/components/task-detail/TaskDetailModal.tsx index 6b9d421a00..c9ffd4adf2 100644 --- a/apps/frontend/src/renderer/components/task-detail/TaskDetailModal.tsx +++ b/apps/frontend/src/renderer/components/task-detail/TaskDetailModal.tsx @@ -1,5 +1,6 @@ import { useTranslation } from 'react-i18next'; import * as DialogPrimitive from '@radix-ui/react-dialog'; +import { useToast } from '../../hooks/use-toast'; import { Separator } from '../ui/separator'; import { Tabs, TabsContent, TabsList, TabsTrigger } from '../ui/tabs'; import { ScrollArea } from '../ui/scroll-area'; @@ -76,6 +77,7 @@ const isFilesTabEnabled = () => { // Separate component to use hooks only when task exists function TaskDetailModalContent({ open, task, onOpenChange, onSwitchToTerminals, onOpenInbuiltTerminal }: { open: boolean; task: Task; onOpenChange: (open: boolean) => void; onSwitchToTerminals?: () => void; onOpenInbuiltTerminal?: (id: string, cwd: string) => void }) { const { t } = useTranslation(['tasks']); + const { toast } = useToast(); const state = useTaskDetail({ task }); const showFilesTab = isFilesTabEnabled(); const progressPercent = calculateProgress(task.subtasks); @@ -162,6 +164,14 @@ function TaskDetailModalContent({ open, task, onOpenChange, onSwitchToTerminals, }; const handleClose = () => { + // Show toast notification if task is running + if (state.isRunning && !state.isStuck) { + toast({ + title: t('tasks:notifications.backgroundTaskTitle'), + description: t('tasks:notifications.backgroundTaskDescription'), + duration: 4000, + }); + } onOpenChange(false); }; @@ -289,7 +299,7 @@ function TaskDetailModalContent({ open, task, onOpenChange, onSwitchToTerminals, variant={task.status === 'done' ? 'success' : task.status === 'human_review' ? 'purple' : task.status === 'in_progress' ? 'info' : 'secondary'} className={cn('text-xs', (task.status === 'in_progress' && !state.isStuck) && 'status-running')} > - {TASK_STATUS_LABELS[task.status]} + {t(TASK_STATUS_LABELS[task.status])} {task.status === 'human_review' && task.reviewReason && ( -
+
{/* Metadata */} diff --git a/apps/frontend/src/renderer/components/task-detail/TaskHeader.tsx b/apps/frontend/src/renderer/components/task-detail/TaskHeader.tsx index c696882ce6..cde11069de 100644 --- a/apps/frontend/src/renderer/components/task-detail/TaskHeader.tsx +++ b/apps/frontend/src/renderer/components/task-detail/TaskHeader.tsx @@ -1,3 +1,4 @@ +import { useTranslation } from 'react-i18next'; import { X, Pencil, AlertTriangle } from 'lucide-react'; import { Button } from '../ui/button'; import { Badge } from '../ui/badge'; @@ -25,6 +26,8 @@ export function TaskHeader({ onClose, onEdit }: TaskHeaderProps) { + const { t } = useTranslation('tasks'); + return (
@@ -65,7 +68,7 @@ export function TaskHeader({ variant={task.status === 'done' ? 'success' : task.status === 'human_review' ? 'purple' : task.status === 'in_progress' ? 'info' : 'secondary'} className={cn('text-xs', (task.status === 'in_progress' && !isStuck) && 'status-running')} > - {TASK_STATUS_LABELS[task.status]} + {t(TASK_STATUS_LABELS[task.status])} {task.status === 'human_review' && task.reviewReason && ( - {isRunning && !isStuck ? 'Cannot edit while task is running' : 'Edit task'} + {isRunning && !isStuck ? t('kanban.cannotEditWhileRunning') : t('kanban.editTask')} -
diff --git a/apps/frontend/src/renderer/components/task-detail/TaskMetadata.tsx b/apps/frontend/src/renderer/components/task-detail/TaskMetadata.tsx index 2deab04757..34e5180d97 100644 --- a/apps/frontend/src/renderer/components/task-detail/TaskMetadata.tsx +++ b/apps/frontend/src/renderer/components/task-detail/TaskMetadata.tsx @@ -138,10 +138,15 @@ export function TaskMetadata({ task }: TaskMetadataProps) { {/* Description - Primary Content */} {task.description && ( -
- - {task.description} - +
+
+ + {task.description} + +
)} diff --git a/apps/frontend/src/renderer/components/task-detail/hooks/useTaskDetail.ts b/apps/frontend/src/renderer/components/task-detail/hooks/useTaskDetail.ts index 8c1cf219aa..5ff2241254 100644 --- a/apps/frontend/src/renderer/components/task-detail/hooks/useTaskDetail.ts +++ b/apps/frontend/src/renderer/components/task-detail/hooks/useTaskDetail.ts @@ -27,7 +27,7 @@ export function useTaskDetail({ task }: UseTaskDetailOptions) { const [showDiscardDialog, setShowDiscardDialog] = useState(false); const [workspaceError, setWorkspaceError] = useState(null); const [showDiffDialog, setShowDiffDialog] = useState(false); - const [stageOnly, setStageOnly] = useState(task.status === 'human_review'); + const [stageOnly, setStageOnly] = useState(false); // Default to full merge for proper cleanup (fixes #243) const [stagedSuccess, setStagedSuccess] = useState(null); const [stagedProjectPath, setStagedProjectPath] = useState(undefined); const [suggestedCommitMessage, setSuggestedCommitMessage] = useState(undefined); @@ -62,11 +62,27 @@ export function useTaskDetail({ task }: UseTaskDetailOptions) { useEffect(() => { let timeoutId: NodeJS.Timeout | undefined; + // IMPORTANT: If execution phase is 'complete' or 'failed', the task is NOT stuck. + // It means the process has finished and status update is pending. + // This prevents false-positive "stuck" indicators when the process exits normally. + const isPhaseTerminal = executionPhase === 'complete' || executionPhase === 'failed'; + if (isPhaseTerminal) { + setIsStuck(false); + setHasCheckedRunning(true); + return; + } + if (isActiveTask && !hasCheckedRunning) { // Wait 2 seconds before checking - gives process time to spawn and register timeoutId = setTimeout(() => { checkTaskRunning(task.id).then((actuallyRunning) => { - setIsStuck(!actuallyRunning); + // Double-check the phase in case it changed while waiting + const latestPhase = task.executionProgress?.phase; + if (latestPhase === 'complete' || latestPhase === 'failed') { + setIsStuck(false); + } else { + setIsStuck(!actuallyRunning); + } setHasCheckedRunning(true); }); }, 2000); @@ -78,7 +94,7 @@ export function useTaskDetail({ task }: UseTaskDetailOptions) { return () => { if (timeoutId) clearTimeout(timeoutId); }; - }, [task.id, isActiveTask, hasCheckedRunning]); + }, [task.id, isActiveTask, hasCheckedRunning, executionPhase, task.executionProgress?.phase]); // Handle scroll events in logs to detect if user scrolled up const handleLogsScroll = (e: React.UIEvent) => { @@ -222,19 +238,9 @@ export function useTaskDetail({ task }: UseTaskDetailOptions) { } }, [task.id]); - // Auto-load merge preview when worktree is ready (eliminates need to click "Check Conflicts") - // NOTE: This must be placed AFTER loadMergePreview definition since it depends on that callback - useEffect(() => { - // Only auto-load if: - // 1. Task needs review - // 2. Worktree exists - // 3. We haven't already loaded the preview for this task - // 4. We're not currently loading - const alreadyLoaded = hasLoadedPreviewRef.current === task.id; - if (needsReview && worktreeStatus?.exists && !alreadyLoaded && !isLoadingPreview) { - loadMergePreview(); - } - }, [needsReview, worktreeStatus?.exists, isLoadingPreview, task.id, loadMergePreview]); + // NOTE: Merge preview is NO LONGER auto-loaded on modal open. + // User must click "Check for Conflicts" button to trigger the expensive preview operation. + // This improves modal open performance significantly (avoids 1-30+ second Python subprocess). return { // State diff --git a/apps/frontend/src/renderer/components/task-detail/task-review/WorkspaceStatus.tsx b/apps/frontend/src/renderer/components/task-detail/task-review/WorkspaceStatus.tsx index ad286d1019..b474c914b1 100644 --- a/apps/frontend/src/renderer/components/task-detail/task-review/WorkspaceStatus.tsx +++ b/apps/frontend/src/renderer/components/task-detail/task-review/WorkspaceStatus.tsx @@ -367,41 +367,72 @@ export function WorkspaceStatus({ {/* Actions Footer */}
- {/* Stage Only Option */} - + {/* Stage Only Option - only show after conflicts have been checked */} + {mergePreview && ( + + )} {/* Primary Actions */}
- + {/* State 1: No merge preview yet - show "Check for Conflicts" */} + {!mergePreview && !isLoadingPreview && ( + + )} + + {/* State 2: Loading merge preview */} + {isLoadingPreview && ( + + )} + + {/* State 3: Merge preview loaded - show appropriate merge/stage button */} + {mergePreview && !isLoadingPreview && ( + + )} + + + + + + ); +} diff --git a/apps/frontend/src/renderer/components/terminal/TerminalHeader.tsx b/apps/frontend/src/renderer/components/terminal/TerminalHeader.tsx index e96e1fede9..cb0191b05d 100644 --- a/apps/frontend/src/renderer/components/terminal/TerminalHeader.tsx +++ b/apps/frontend/src/renderer/components/terminal/TerminalHeader.tsx @@ -1,11 +1,14 @@ -import { X, Sparkles, TerminalSquare } from 'lucide-react'; -import type { Task } from '../../../shared/types'; +import { X, Sparkles, TerminalSquare, FolderGit, ExternalLink, GripVertical, Maximize2, Minimize2 } from 'lucide-react'; +import { useTranslation } from 'react-i18next'; +import type { SyntheticListenerMap } from '@dnd-kit/core/dist/hooks/utilities'; +import type { Task, TerminalWorktreeConfig } from '../../../shared/types'; import type { TerminalStatus } from '../../stores/terminal-store'; import { Button } from '../ui/button'; import { cn } from '../../lib/utils'; import { STATUS_COLORS } from './types'; import { TerminalTitle } from './TerminalTitle'; import { TaskSelector } from './TaskSelector'; +import { WorktreeSelector } from './WorktreeSelector'; interface TerminalHeaderProps { terminalId: string; @@ -21,6 +24,22 @@ interface TerminalHeaderProps { onClearTask: () => void; onNewTaskClick?: () => void; terminalCount?: number; + /** Worktree configuration if terminal is associated with a worktree */ + worktreeConfig?: TerminalWorktreeConfig; + /** Project path for worktree operations */ + projectPath?: string; + /** Callback to open worktree creation dialog */ + onCreateWorktree?: () => void; + /** Callback when an existing worktree is selected */ + onSelectWorktree?: (config: TerminalWorktreeConfig) => void; + /** Callback to open worktree in IDE */ + onOpenInIDE?: () => void; + /** Drag handle listeners for terminal reordering */ + dragHandleListeners?: SyntheticListenerMap; + /** Whether the terminal is expanded to full view */ + isExpanded?: boolean; + /** Callback to toggle expanded state */ + onToggleExpand?: () => void; } export function TerminalHeader({ @@ -37,12 +56,37 @@ export function TerminalHeader({ onClearTask, onNewTaskClick, terminalCount = 1, + worktreeConfig, + projectPath, + onCreateWorktree, + onSelectWorktree, + onOpenInIDE, + dragHandleListeners, + isExpanded, + onToggleExpand, }: TerminalHeaderProps) { + const { t } = useTranslation(['terminal', 'common']); const backlogTasks = tasks.filter((t) => t.status === 'backlog'); return ( -
+
+ {/* Drag handle - visible on hover */} + {dragHandleListeners && ( +
+ +
+ )}
@@ -69,8 +113,40 @@ export function TerminalHeader({ onNewTaskClick={onNewTaskClick} /> )} + {/* Worktree selector or badge - placed next to task selector */} + {worktreeConfig ? ( + + + {worktreeConfig.name} + + ) : ( + projectPath && onCreateWorktree && onSelectWorktree && ( + + ) + )}
+ {/* Open in IDE button when worktree exists */} + {worktreeConfig && onOpenInIDE && ( + + )} {!isClaudeMode && status !== 'exited' && ( + )} diff --git a/apps/frontend/src/renderer/components/terminal/WorktreeSelector.tsx b/apps/frontend/src/renderer/components/terminal/WorktreeSelector.tsx new file mode 100644 index 0000000000..d9b449e3d0 --- /dev/null +++ b/apps/frontend/src/renderer/components/terminal/WorktreeSelector.tsx @@ -0,0 +1,224 @@ +import { useState, useEffect } from 'react'; +import { FolderGit, Plus, ChevronDown, Loader2, Trash2 } from 'lucide-react'; +import { useTranslation } from 'react-i18next'; +import type { TerminalWorktreeConfig } from '../../../shared/types'; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from '../ui/dropdown-menu'; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from '../ui/alert-dialog'; +import { cn } from '../../lib/utils'; + +interface WorktreeSelectorProps { + terminalId: string; + projectPath: string; + /** Currently attached worktree config, if any */ + currentWorktree?: TerminalWorktreeConfig; + /** Callback to create a new worktree */ + onCreateWorktree: () => void; + /** Callback when an existing worktree is selected */ + onSelectWorktree: (config: TerminalWorktreeConfig) => void; +} + +export function WorktreeSelector({ + terminalId: _terminalId, + projectPath, + currentWorktree, + onCreateWorktree, + onSelectWorktree, +}: WorktreeSelectorProps) { + const { t } = useTranslation(['terminal', 'common']); + const [worktrees, setWorktrees] = useState([]); + const [isLoading, setIsLoading] = useState(false); + const [isOpen, setIsOpen] = useState(false); + const [deleteWorktree, setDeleteWorktree] = useState(null); + const [isDeleting, setIsDeleting] = useState(false); + + // Fetch worktrees when dropdown opens + const fetchWorktrees = async () => { + if (!projectPath) return; + setIsLoading(true); + try { + const result = await window.electronAPI.listTerminalWorktrees(projectPath); + if (result.success && result.data) { + // Filter out the current worktree from the list + const available = currentWorktree + ? result.data.filter((wt) => wt.name !== currentWorktree.name) + : result.data; + setWorktrees(available); + } + } catch (err) { + console.error('Failed to fetch worktrees:', err); + } finally { + setIsLoading(false); + } + }; + + useEffect(() => { + if (isOpen && projectPath) { + fetchWorktrees(); + } + }, [isOpen, projectPath, currentWorktree]); + + // Handle delete worktree + const handleDeleteWorktree = async () => { + if (!deleteWorktree || !projectPath) return; + setIsDeleting(true); + try { + const result = await window.electronAPI.removeTerminalWorktree( + projectPath, + deleteWorktree.name, + deleteWorktree.hasGitBranch // Delete the branch too if it was created + ); + if (result.success) { + // Refresh the list + await fetchWorktrees(); + } else { + console.error('Failed to delete worktree:', result.error); + } + } catch (err) { + console.error('Failed to delete worktree:', err); + } finally { + setIsDeleting(false); + setDeleteWorktree(null); + } + }; + + // If terminal already has a worktree, show worktree badge (handled in TerminalHeader) + // This component only shows when there's no worktree attached + + return ( + <> + + + + + + {/* New Worktree - always at top */} + { + e.stopPropagation(); + setIsOpen(false); + onCreateWorktree(); + }} + className="text-xs text-amber-500" + > + + {t('terminal:worktree.createNew')} + + + {/* Separator and existing worktrees */} + {isLoading ? ( + <> + +
+ +
+ + ) : worktrees.length > 0 ? ( + <> + +
+ {t('terminal:worktree.existing')} +
+ {worktrees.map((wt) => ( + { + e.stopPropagation(); + setIsOpen(false); + onSelectWorktree(wt); + }} + className="text-xs group" + > + +
+ {wt.name} + {wt.branchName && ( + + {wt.branchName} + + )} +
+ +
+ ))} + + ) : null} +
+
+ + {/* Delete Confirmation Dialog */} + !open && setDeleteWorktree(null)}> + + + {t('terminal:worktree.deleteTitle', 'Delete Worktree?')} + + {t('terminal:worktree.deleteDescription', 'This will permanently delete the worktree and its branch. Any uncommitted changes will be lost.')} + {deleteWorktree && ( + + {deleteWorktree.name} + {deleteWorktree.branchName && ( + ({deleteWorktree.branchName}) + )} + + )} + + + + {t('common:cancel')} + + {isDeleting ? ( + <> + + {t('common:deleting', 'Deleting...')} + + ) : ( + <> + + {t('common:delete')} + + )} + + + + + + ); +} diff --git a/apps/frontend/src/renderer/components/terminal/types.ts b/apps/frontend/src/renderer/components/terminal/types.ts index fe3cee083d..aa965d132f 100644 --- a/apps/frontend/src/renderer/components/terminal/types.ts +++ b/apps/frontend/src/renderer/components/terminal/types.ts @@ -1,3 +1,4 @@ +import type { SyntheticListenerMap } from '@dnd-kit/core/dist/hooks/utilities'; import type { Task, ExecutionPhase } from '../../../shared/types'; import type { TerminalStatus } from '../../stores/terminal-store'; import { Circle, Search, Code2, Wrench, CheckCircle2, AlertCircle } from 'lucide-react'; @@ -12,6 +13,14 @@ export interface TerminalProps { tasks?: Task[]; onNewTaskClick?: () => void; terminalCount?: number; + /** Drag handle listeners from useSortable for terminal reordering */ + dragHandleListeners?: SyntheticListenerMap; + /** Whether this terminal is currently being dragged */ + isDragging?: boolean; + /** Whether the terminal is expanded to full view */ + isExpanded?: boolean; + /** Callback to toggle expanded state */ + onToggleExpand?: () => void; } /** @@ -19,11 +28,11 @@ export interface TerminalProps { * More terminals = narrower title to fit all elements. */ export function getTitleMaxWidthClass(terminalCount: number): string { - if (terminalCount <= 2) return 'max-w-64'; // 256px - large - if (terminalCount <= 4) return 'max-w-48'; // 192px - medium - if (terminalCount <= 6) return 'max-w-40'; // 160px - default - if (terminalCount <= 9) return 'max-w-32'; // 128px - compact - return 'max-w-24'; // 96px - very compact for 10-12 terminals + if (terminalCount <= 2) return 'max-w-72'; // 288px - large + if (terminalCount <= 4) return 'max-w-56'; // 224px - medium + if (terminalCount <= 6) return 'max-w-48'; // 192px - default + if (terminalCount <= 9) return 'max-w-40'; // 160px - compact + return 'max-w-36'; // 144px - compact for 10-12 terminals } export const STATUS_COLORS: Record = { diff --git a/apps/frontend/src/renderer/components/terminal/useAutoNaming.ts b/apps/frontend/src/renderer/components/terminal/useAutoNaming.ts index 1b1eb992ce..d9b1122b11 100644 --- a/apps/frontend/src/renderer/components/terminal/useAutoNaming.ts +++ b/apps/frontend/src/renderer/components/terminal/useAutoNaming.ts @@ -62,6 +62,8 @@ export function useAutoNaming({ terminalId, cwd }: UseAutoNamingOptions) { const result = await window.electronAPI.generateTerminalName(command, terminal?.cwd || cwd); if (result.success && result.data) { updateTerminal(terminalId, { title: result.data }); + // Sync to main process so title persists across hot reloads + window.electronAPI.setTerminalTitle(terminalId, result.data); } } catch (error) { console.warn('[Terminal] Auto-naming failed:', error); diff --git a/apps/frontend/src/renderer/components/terminal/usePtyProcess.ts b/apps/frontend/src/renderer/components/terminal/usePtyProcess.ts index dcd516383a..2b40fe4a73 100644 --- a/apps/frontend/src/renderer/components/terminal/usePtyProcess.ts +++ b/apps/frontend/src/renderer/components/terminal/usePtyProcess.ts @@ -1,4 +1,4 @@ -import { useEffect, useRef } from 'react'; +import { useEffect, useRef, useCallback, useState, type RefObject } from 'react'; import { useTerminalStore } from '../../stores/terminal-store'; interface UsePtyProcessOptions { @@ -7,6 +7,10 @@ interface UsePtyProcessOptions { projectPath?: string; cols: number; rows: number; + skipCreation?: boolean; // Skip PTY creation until dimensions are ready + // Track deliberate recreation scenarios (e.g., worktree switching) + // When true, resets terminal status to 'idle' to allow proper recreation + isRecreatingRef?: RefObject; onCreated?: () => void; onError?: (error: string) => void; } @@ -17,22 +21,59 @@ export function usePtyProcess({ projectPath, cols, rows, + skipCreation = false, + isRecreatingRef, onCreated, onError, }: UsePtyProcessOptions) { const isCreatingRef = useRef(false); const isCreatedRef = useRef(false); - const setTerminalStatus = useTerminalStore((state) => state.setTerminalStatus); - const updateTerminal = useTerminalStore((state) => state.updateTerminal); + const currentCwdRef = useRef(cwd); + // Trigger state to force re-creation after resetForRecreate() + // Refs don't trigger re-renders, so we need a state to ensure the effect runs + const [recreationTrigger, setRecreationTrigger] = useState(0); + + // Use getState() pattern for store actions to avoid React Fast Refresh issues + // The selectors like useTerminalStore((state) => state.setTerminalStatus) can fail + // during HMR with "Should have a queue" errors. Using getState() in callbacks + // avoids this by not relying on React's hook queue mechanism. + const getStore = useCallback(() => useTerminalStore.getState(), []); + + // Track cwd changes - if cwd changes while terminal exists, trigger recreate + useEffect(() => { + if (currentCwdRef.current !== cwd) { + // Only reset if we're not already in a controlled recreation process. + // prepareForRecreate() sets isCreatingRef=true to prevent auto-recreation + // while awaiting destroyTerminal(). Without this check, we'd reset isCreatingRef + // back to false before destroyTerminal completes, causing a race condition + // where a new PTY is created before the old one is destroyed. + if (isCreatedRef.current && !isCreatingRef.current) { + // Terminal exists and we're not in a controlled recreation, reset refs + isCreatedRef.current = false; + } + currentCwdRef.current = cwd; + } + }, [cwd]); // Create PTY process + // recreationTrigger is included to force the effect to run after resetForRecreate() + // since refs don't trigger re-renders useEffect(() => { + // Skip creation if explicitly told to (waiting for dimensions) + if (skipCreation) return; if (isCreatingRef.current || isCreatedRef.current) return; - const terminalState = useTerminalStore.getState().terminals.find((t) => t.id === terminalId); + const store = getStore(); + const terminalState = store.terminals.find((t) => t.id === terminalId); const alreadyRunning = terminalState?.status === 'running' || terminalState?.status === 'claude-active'; const isRestored = terminalState?.isRestored; + // When recreating (e.g., worktree switching), reset status from 'exited' to 'idle' + // This allows proper recreation after deliberate terminal destruction + if (isRecreatingRef?.current && terminalState?.status === 'exited') { + store.setTerminalStatus(terminalId, 'idle'); + } + isCreatingRef.current = true; if (isRestored && terminalState) { @@ -47,22 +88,37 @@ export function usePtyProcess({ claudeSessionId: terminalState.claudeSessionId, outputBuffer: '', createdAt: terminalState.createdAt.toISOString(), - lastActiveAt: new Date().toISOString() + lastActiveAt: new Date().toISOString(), + // Pass worktreeConfig so backend can restore it and persist correctly + worktreeConfig: terminalState.worktreeConfig, }, cols, rows ).then((result) => { if (result.success && result.data?.success) { isCreatedRef.current = true; - setTerminalStatus(terminalId, terminalState.isClaudeMode ? 'claude-active' : 'running'); - updateTerminal(terminalId, { isRestored: false }); + // Clear recreation flag after successful PTY creation + if (isRecreatingRef?.current) { + isRecreatingRef.current = false; + } + const store = getStore(); + store.setTerminalStatus(terminalId, terminalState.isClaudeMode ? 'claude-active' : 'running'); + store.updateTerminal(terminalId, { isRestored: false }); onCreated?.(); } else { const error = `Error restoring session: ${result.data?.error || result.error}`; + // Clear recreation flag on failure to prevent terminal from being stuck + if (isRecreatingRef?.current) { + isRecreatingRef.current = false; + } onError?.(error); } isCreatingRef.current = false; }).catch((err) => { + // Clear recreation flag on failure to prevent terminal from being stuck + if (isRecreatingRef?.current) { + isRecreatingRef.current = false; + } onError?.(err.message); isCreatingRef.current = false; }); @@ -77,22 +133,53 @@ export function usePtyProcess({ }).then((result) => { if (result.success) { isCreatedRef.current = true; + // Clear recreation flag after successful PTY creation + if (isRecreatingRef?.current) { + isRecreatingRef.current = false; + } if (!alreadyRunning) { - setTerminalStatus(terminalId, 'running'); + getStore().setTerminalStatus(terminalId, 'running'); } onCreated?.(); } else { + // Clear recreation flag on failure to prevent terminal from being stuck + if (isRecreatingRef?.current) { + isRecreatingRef.current = false; + } onError?.(result.error || 'Unknown error'); } isCreatingRef.current = false; }).catch((err) => { + // Clear recreation flag on failure to prevent terminal from being stuck + if (isRecreatingRef?.current) { + isRecreatingRef.current = false; + } onError?.(err.message); isCreatingRef.current = false; }); } - }, [terminalId, cwd, projectPath, cols, rows, setTerminalStatus, updateTerminal, onCreated, onError]); + + }, [terminalId, cwd, projectPath, cols, rows, skipCreation, recreationTrigger, getStore, onCreated, onError]); + + // Function to prepare for recreation by preventing the effect from running + // Call this BEFORE updating the store cwd to avoid race condition + const prepareForRecreate = useCallback(() => { + isCreatingRef.current = true; + }, []); + + // Function to reset refs and allow recreation + // Call this AFTER destroying the old terminal + // Increments recreationTrigger to force the effect to run since refs don't trigger re-renders + const resetForRecreate = useCallback(() => { + isCreatedRef.current = false; + isCreatingRef.current = false; + // Increment trigger to force the creation effect to run + setRecreationTrigger((prev) => prev + 1); + }, []); return { isCreated: isCreatedRef.current, + prepareForRecreate, + resetForRecreate, }; } diff --git a/apps/frontend/src/renderer/components/terminal/useTerminalEvents.ts b/apps/frontend/src/renderer/components/terminal/useTerminalEvents.ts index a12a400bbf..60e60d9f75 100644 --- a/apps/frontend/src/renderer/components/terminal/useTerminalEvents.ts +++ b/apps/frontend/src/renderer/components/terminal/useTerminalEvents.ts @@ -1,9 +1,12 @@ -import { useEffect, useRef } from 'react'; +import { useEffect, useRef, type RefObject } from 'react'; import { useTerminalStore } from '../../stores/terminal-store'; import { terminalBufferManager } from '../../lib/terminal-buffer-manager'; interface UseTerminalEventsOptions { terminalId: string; + // Track deliberate recreation scenarios (e.g., worktree switching) + // When true, skips auto-removal to allow proper recreation + isRecreatingRef?: RefObject; onOutput?: (data: string) => void; onExit?: (exitCode: number) => void; onTitleChange?: (title: string) => void; @@ -12,6 +15,7 @@ interface UseTerminalEventsOptions { export function useTerminalEvents({ terminalId, + isRecreatingRef, onOutput, onExit, onTitleChange, @@ -58,8 +62,40 @@ export function useTerminalEvents({ useEffect(() => { const cleanup = window.electronAPI.onTerminalExit((id, exitCode) => { if (id === terminalId) { - useTerminalStore.getState().setTerminalStatus(terminalId, 'exited'); + // During deliberate recreation (e.g., worktree switching), skip the normal + // exit handling to prevent setting status to 'exited' and scheduling removal. + // The recreation flow will handle status transitions. + if (isRecreatingRef?.current) { + onExitRef.current?.(exitCode); + return; + } + + const store = useTerminalStore.getState(); + store.setTerminalStatus(terminalId, 'exited'); + // Reset Claude mode when terminal exits - the Claude process has ended + // Use updateTerminal instead of setClaudeMode to avoid changing status back to 'running' + const terminal = store.getTerminal(terminalId); + if (terminal?.isClaudeMode) { + store.updateTerminal(terminalId, { isClaudeMode: false }); + } onExitRef.current?.(exitCode); + + // Auto-remove exited terminals from store after a short delay + // This prevents them from counting toward the max terminal limit + // and ensures they don't get persisted and restored on next launch + setTimeout(() => { + const currentStore = useTerminalStore.getState(); + const currentTerminal = currentStore.getTerminal(terminalId); + // Only remove if still exited (user hasn't recreated it) + if (currentTerminal?.status === 'exited') { + // First call destroyTerminal to clean up persisted session on disk + // (the PTY is already dead, but this ensures session removal) + window.electronAPI.destroyTerminal(terminalId).catch(() => { + // Ignore errors - PTY may already be gone + }); + currentStore.removeTerminal(terminalId); + } + }, 2000); // 2 second delay to show exit message } }); @@ -82,7 +118,11 @@ export function useTerminalEvents({ useEffect(() => { const cleanup = window.electronAPI.onTerminalClaudeSession((id, sessionId) => { if (id === terminalId) { - useTerminalStore.getState().setClaudeSessionId(terminalId, sessionId); + const store = useTerminalStore.getState(); + store.setClaudeSessionId(terminalId, sessionId); + // Also set Claude mode to true when we receive a session ID + // This ensures the Claude badge shows up after auto-resume + store.setClaudeMode(terminalId, true); console.warn('[Terminal] Captured Claude session ID:', sessionId); onClaudeSessionRef.current?.(sessionId); } @@ -90,4 +130,26 @@ export function useTerminalEvents({ return cleanup; }, [terminalId]); + + // Handle Claude busy state changes (for visual indicator) + useEffect(() => { + const cleanup = window.electronAPI.onTerminalClaudeBusy((id, isBusy) => { + if (id === terminalId) { + useTerminalStore.getState().setClaudeBusy(terminalId, isBusy); + } + }); + + return cleanup; + }, [terminalId]); + + // Handle pending Claude resume notification (for deferred resume on tab activation) + useEffect(() => { + const cleanup = window.electronAPI.onTerminalPendingResume((id, _sessionId) => { + if (id === terminalId) { + useTerminalStore.getState().setPendingClaudeResume(terminalId, true); + } + }); + + return cleanup; + }, [terminalId]); } diff --git a/apps/frontend/src/renderer/components/terminal/useXterm.ts b/apps/frontend/src/renderer/components/terminal/useXterm.ts index 9da5471a6e..87c0a7cd68 100644 --- a/apps/frontend/src/renderer/components/terminal/useXterm.ts +++ b/apps/frontend/src/renderer/components/terminal/useXterm.ts @@ -1,4 +1,4 @@ -import { useEffect, useRef, useCallback } from 'react'; +import { useEffect, useRef, useCallback, useState } from 'react'; import { Terminal as XTerm } from '@xterm/xterm'; import { FitAddon } from '@xterm/addon-fit'; import { WebLinksAddon } from '@xterm/addon-web-links'; @@ -9,15 +9,27 @@ interface UseXtermOptions { terminalId: string; onCommandEnter?: (command: string) => void; onResize?: (cols: number, rows: number) => void; + onDimensionsReady?: (cols: number, rows: number) => void; } -export function useXterm({ terminalId, onCommandEnter, onResize }: UseXtermOptions) { +// Debounce helper function +function debounce void>(fn: T, ms: number): T { + let timeoutId: ReturnType | null = null; + return ((...args: unknown[]) => { + if (timeoutId) clearTimeout(timeoutId); + timeoutId = setTimeout(() => fn(...args), ms); + }) as T; +} + +export function useXterm({ terminalId, onCommandEnter, onResize, onDimensionsReady }: UseXtermOptions) { const terminalRef = useRef(null); const xtermRef = useRef(null); const fitAddonRef = useRef(null); const serializeAddonRef = useRef(null); const commandBufferRef = useRef(''); const isDisposedRef = useRef(false); + const dimensionsReadyCalledRef = useRef(false); + const [dimensions, setDimensions] = useState<{ cols: number; rows: number }>({ cols: 80, rows: 24 }); // Initialize xterm.js UI useEffect(() => { @@ -73,6 +85,22 @@ export function useXterm({ terminalId, onCommandEnter, onResize }: UseXtermOptio xterm.attachCustomKeyEventHandler((event) => { const isMod = event.metaKey || event.ctrlKey; + // Handle SHIFT+Enter for multi-line input (send newline character) + // This matches VS Code/Cursor behavior for multi-line input in Claude Code + if (event.key === 'Enter' && event.shiftKey && !isMod && event.type === 'keydown') { + // Send ESC + newline - same as OPTION+Enter which works for multi-line + xterm.input('\x1b\n'); + return false; // Prevent default xterm handling + } + + // Handle CMD+Backspace (Mac) or Ctrl+Backspace (Windows/Linux) to delete line + // Sends Ctrl+U which is the terminal standard for "kill line backward" + const isDeleteLine = event.key === 'Backspace' && event.type === 'keydown' && isMod; + if (isDeleteLine) { + xterm.input('\x15'); // Ctrl+U + return false; + } + // Let Cmd/Ctrl + number keys pass through for project tab switching if (isMod && event.key >= '1' && event.key <= '9') { return false; // Don't handle in xterm, let it bubble up @@ -93,14 +121,36 @@ export function useXterm({ terminalId, onCommandEnter, onResize }: UseXtermOptio return true; }); - setTimeout(() => { - fitAddon.fit(); - }, 50); - xtermRef.current = xterm; fitAddonRef.current = fitAddon; serializeAddonRef.current = serializeAddon; + // Use requestAnimationFrame to wait for layout, then fit + // This is more reliable than a fixed timeout + const performInitialFit = () => { + requestAnimationFrame(() => { + if (fitAddonRef.current && xtermRef.current && terminalRef.current) { + // Check if container has valid dimensions + const rect = terminalRef.current.getBoundingClientRect(); + if (rect.width > 0 && rect.height > 0) { + fitAddonRef.current.fit(); + const cols = xtermRef.current.cols; + const rows = xtermRef.current.rows; + setDimensions({ cols, rows }); + // Call onDimensionsReady once when we have valid dimensions + if (!dimensionsReadyCalledRef.current && cols > 0 && rows > 0) { + dimensionsReadyCalledRef.current = true; + onDimensionsReady?.(cols, rows); + } + } else { + // Container not ready yet, retry after a short delay + setTimeout(performInitialFit, 50); + } + } + }); + }; + performInitialFit(); + // Replay buffered output if this is a remount or restored session // This now includes ANSI codes for proper formatting/colors/prompt const bufferedOutput = terminalBufferManager.get(terminalId); @@ -140,23 +190,36 @@ export function useXterm({ terminalId, onCommandEnter, onResize }: UseXtermOptio return () => { // Cleanup handled by parent component }; - }, [terminalId, onCommandEnter, onResize]); + }, [terminalId, onCommandEnter, onResize, onDimensionsReady]); - // Handle resize on container resize + // Handle resize on container resize with debouncing useEffect(() => { - const handleResize = () => { - if (fitAddonRef.current && xtermRef.current) { - fitAddonRef.current.fit(); + const handleResize = debounce(() => { + if (fitAddonRef.current && xtermRef.current && terminalRef.current) { + // Check if container has valid dimensions before fitting + const rect = terminalRef.current.getBoundingClientRect(); + if (rect.width > 0 && rect.height > 0) { + fitAddonRef.current.fit(); + const cols = xtermRef.current.cols; + const rows = xtermRef.current.rows; + setDimensions({ cols, rows }); + // Notify when dimensions become valid (for late PTY creation) + if (!dimensionsReadyCalledRef.current && cols > 0 && rows > 0) { + dimensionsReadyCalledRef.current = true; + onDimensionsReady?.(cols, rows); + } + } } - }; + }, 100); // 100ms debounce to prevent layout thrashing - const container = terminalRef.current?.parentElement; + // Observe the terminalRef directly (not parent) for accurate resize detection + const container = terminalRef.current; if (container) { const resizeObserver = new ResizeObserver(handleResize); resizeObserver.observe(container); return () => resizeObserver.disconnect(); } - }, []); + }, [onDimensionsReady]); const fit = useCallback(() => { if (fitAddonRef.current && xtermRef.current) { @@ -227,7 +290,8 @@ export function useXterm({ terminalId, onCommandEnter, onResize }: UseXtermOptio writeln, focus, dispose, - cols: xtermRef.current?.cols || 80, - rows: xtermRef.current?.rows || 24, + cols: dimensions.cols, + rows: dimensions.rows, + dimensionsReady: dimensionsReadyCalledRef.current, }; } diff --git a/apps/frontend/src/renderer/components/ui/error-boundary.tsx b/apps/frontend/src/renderer/components/ui/error-boundary.tsx index 4ee32d69aa..152e663f40 100644 --- a/apps/frontend/src/renderer/components/ui/error-boundary.tsx +++ b/apps/frontend/src/renderer/components/ui/error-boundary.tsx @@ -2,6 +2,7 @@ import React from 'react'; import { AlertTriangle, RefreshCw } from 'lucide-react'; import { Button } from './button'; import { Card, CardContent } from './card'; +import { captureException } from '../../lib/sentry'; interface ErrorBoundaryProps { children: React.ReactNode; @@ -30,6 +31,11 @@ export class ErrorBoundary extends React.Component { diff --git a/apps/frontend/src/renderer/components/ui/scroll-area.tsx b/apps/frontend/src/renderer/components/ui/scroll-area.tsx index 5eaf1be985..2ca201e79d 100644 --- a/apps/frontend/src/renderer/components/ui/scroll-area.tsx +++ b/apps/frontend/src/renderer/components/ui/scroll-area.tsx @@ -4,14 +4,18 @@ import { cn } from '../../lib/utils'; const ScrollArea = React.forwardRef< React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( + React.ComponentPropsWithoutRef & { + viewportClassName?: string; +} +>(({ className, children, viewportClassName, ...props }, ref) => ( - + {children} diff --git a/apps/frontend/src/renderer/components/ui/toast.tsx b/apps/frontend/src/renderer/components/ui/toast.tsx new file mode 100644 index 0000000000..5e5a9aaa0f --- /dev/null +++ b/apps/frontend/src/renderer/components/ui/toast.tsx @@ -0,0 +1,130 @@ +/** + * Toast UI Components + * + * Based on Radix UI Toast for non-intrusive notifications. + */ +import * as React from 'react'; +import * as ToastPrimitives from '@radix-ui/react-toast'; +import { cva, type VariantProps } from 'class-variance-authority'; +import { X } from 'lucide-react'; + +import { cn } from '../../lib/utils'; + +const ToastProvider = ToastPrimitives.Provider; + +const ToastViewport = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +ToastViewport.displayName = ToastPrimitives.Viewport.displayName; + +const toastVariants = cva( + 'group pointer-events-auto relative flex w-full items-center justify-between space-x-4 overflow-hidden rounded-md border p-6 pr-8 shadow-lg transition-all data-[swipe=cancel]:translate-x-0 data-[swipe=end]:translate-x-[var(--radix-toast-swipe-end-x)] data-[swipe=move]:translate-x-[var(--radix-toast-swipe-move-x)] data-[swipe=move]:transition-none data-[state=open]:animate-in data-[state=closed]:animate-out data-[swipe=end]:animate-out data-[state=closed]:fade-out-80 data-[state=closed]:slide-out-to-right-full data-[state=open]:slide-in-from-top-full data-[state=open]:sm:slide-in-from-bottom-full', + { + variants: { + variant: { + default: 'border bg-card text-foreground', + destructive: 'destructive group border-destructive bg-destructive text-destructive-foreground', + }, + }, + defaultVariants: { + variant: 'default', + }, + } +); + +const Toast = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef & VariantProps +>(({ className, variant, ...props }, ref) => { + return ( + + ); +}); +Toast.displayName = ToastPrimitives.Root.displayName; + +const ToastAction = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +ToastAction.displayName = ToastPrimitives.Action.displayName; + +const ToastClose = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + +)); +ToastClose.displayName = ToastPrimitives.Close.displayName; + +const ToastTitle = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +ToastTitle.displayName = ToastPrimitives.Title.displayName; + +const ToastDescription = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +ToastDescription.displayName = ToastPrimitives.Description.displayName; + +type ToastProps = React.ComponentPropsWithoutRef; + +type ToastActionElement = React.ReactElement; + +export { + type ToastProps, + type ToastActionElement, + ToastProvider, + ToastViewport, + Toast, + ToastTitle, + ToastDescription, + ToastClose, + ToastAction, +}; diff --git a/apps/frontend/src/renderer/components/ui/toaster.tsx b/apps/frontend/src/renderer/components/ui/toaster.tsx new file mode 100644 index 0000000000..232372ca48 --- /dev/null +++ b/apps/frontend/src/renderer/components/ui/toaster.tsx @@ -0,0 +1,39 @@ +/** + * Toaster Component + * + * Renders the toast viewport where toasts are displayed. + * Should be included once in the app root. + */ +import { + Toast, + ToastClose, + ToastDescription, + ToastProvider, + ToastTitle, + ToastViewport, +} from './toast'; +import { useToast } from '../../hooks/use-toast'; + +export function Toaster() { + const { toasts } = useToast(); + + return ( + + {toasts.map(function ({ id, title, description, action, ...props }) { + return ( + +
+ {title && {title}} + {description && ( + {description} + )} +
+ {action} + +
+ ); + })} + +
+ ); +} diff --git a/apps/frontend/src/renderer/hooks/index.ts b/apps/frontend/src/renderer/hooks/index.ts index 0793d0458d..21f70a6ac9 100644 --- a/apps/frontend/src/renderer/hooks/index.ts +++ b/apps/frontend/src/renderer/hooks/index.ts @@ -1,3 +1,4 @@ // Export all custom hooks export { useIpcListeners } from './useIpc'; export { useVirtualizedTree } from './useVirtualizedTree'; +export { useClaudeLoginTerminal } from './useClaudeLoginTerminal'; diff --git a/apps/frontend/src/renderer/hooks/use-toast.ts b/apps/frontend/src/renderer/hooks/use-toast.ts new file mode 100644 index 0000000000..302de84519 --- /dev/null +++ b/apps/frontend/src/renderer/hooks/use-toast.ts @@ -0,0 +1,192 @@ +/** + * Toast Hook + * + * Manages toast state for displaying notifications. + */ +import * as React from 'react'; + +import type { ToastActionElement, ToastProps } from '../components/ui/toast'; + +const TOAST_LIMIT = 1; +const TOAST_REMOVE_DELAY = 1000000; + +type ToasterToast = ToastProps & { + id: string; + title?: React.ReactNode; + description?: React.ReactNode; + action?: ToastActionElement; +}; + +const actionTypes = { + ADD_TOAST: 'ADD_TOAST', + UPDATE_TOAST: 'UPDATE_TOAST', + DISMISS_TOAST: 'DISMISS_TOAST', + REMOVE_TOAST: 'REMOVE_TOAST', +} as const; + +let count = 0; + +function genId() { + count = (count + 1) % Number.MAX_SAFE_INTEGER; + return count.toString(); +} + +type ActionType = typeof actionTypes; + +type Action = + | { + type: ActionType['ADD_TOAST']; + toast: ToasterToast; + } + | { + type: ActionType['UPDATE_TOAST']; + toast: Partial; + } + | { + type: ActionType['DISMISS_TOAST']; + toastId?: ToasterToast['id']; + } + | { + type: ActionType['REMOVE_TOAST']; + toastId?: ToasterToast['id']; + }; + +interface State { + toasts: ToasterToast[]; +} + +const toastTimeouts = new Map>(); + +const addToRemoveQueue = (toastId: string) => { + if (toastTimeouts.has(toastId)) { + return; + } + + const timeout = setTimeout(() => { + toastTimeouts.delete(toastId); + dispatch({ + type: 'REMOVE_TOAST', + toastId: toastId, + }); + }, TOAST_REMOVE_DELAY); + + toastTimeouts.set(toastId, timeout); +}; + +export const reducer = (state: State, action: Action): State => { + switch (action.type) { + case 'ADD_TOAST': + return { + ...state, + toasts: [action.toast, ...state.toasts].slice(0, TOAST_LIMIT), + }; + + case 'UPDATE_TOAST': + return { + ...state, + toasts: state.toasts.map((t) => + t.id === action.toast.id ? { ...t, ...action.toast } : t + ), + }; + + case 'DISMISS_TOAST': { + const { toastId } = action; + + if (toastId) { + addToRemoveQueue(toastId); + } else { + state.toasts.forEach((toast) => { + addToRemoveQueue(toast.id); + }); + } + + return { + ...state, + toasts: state.toasts.map((t) => + t.id === toastId || toastId === undefined + ? { + ...t, + open: false, + } + : t + ), + }; + } + case 'REMOVE_TOAST': + if (action.toastId === undefined) { + return { + ...state, + toasts: [], + }; + } + return { + ...state, + toasts: state.toasts.filter((t) => t.id !== action.toastId), + }; + } +}; + +const listeners: Array<(state: State) => void> = []; + +let memoryState: State = { toasts: [] }; + +function dispatch(action: Action) { + memoryState = reducer(memoryState, action); + listeners.forEach((listener) => { + listener(memoryState); + }); +} + +type Toast = Omit; + +function toast({ ...props }: Toast) { + const id = genId(); + + const update = (props: ToasterToast) => + dispatch({ + type: 'UPDATE_TOAST', + toast: { ...props, id }, + }); + + const dismiss = () => dispatch({ type: 'DISMISS_TOAST', toastId: id }); + + dispatch({ + type: 'ADD_TOAST', + toast: { + ...props, + id, + open: true, + onOpenChange: (open) => { + if (!open) dismiss(); + }, + }, + }); + + return { + id: id, + dismiss, + update, + }; +} + +function useToast() { + const [state, setState] = React.useState(memoryState); + + React.useEffect(() => { + listeners.push(setState); + return () => { + const index = listeners.indexOf(setState); + if (index > -1) { + listeners.splice(index, 1); + } + }; + }, []); + + return { + ...state, + toast, + dismiss: (toastId?: string) => dispatch({ type: 'DISMISS_TOAST', toastId }), + }; +} + +export { useToast, toast }; diff --git a/apps/frontend/src/renderer/hooks/useClaudeLoginTerminal.ts b/apps/frontend/src/renderer/hooks/useClaudeLoginTerminal.ts new file mode 100644 index 0000000000..616dfb26bb --- /dev/null +++ b/apps/frontend/src/renderer/hooks/useClaudeLoginTerminal.ts @@ -0,0 +1,37 @@ +import { useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useTerminalStore } from '../stores/terminal-store'; +import { toast } from './use-toast'; + +/** + * Custom hook to handle Claude profile login terminal visibility. + * Listens for onTerminalAuthCreated events and adds the terminal + * to the store so users can see the OAuth flow output. + */ +export function useClaudeLoginTerminal() { + const { t } = useTranslation('terminal'); + const addExternalTerminal = useTerminalStore((state) => state.addExternalTerminal); + + useEffect(() => { + const unsubscribe = window.electronAPI.onTerminalAuthCreated((info) => { + // Add the terminal to the store so it becomes visible in the UI + // This allows users to see the 'claude setup-token' output and complete the OAuth flow + // cwd is optional and defaults to HOME or '~' in addExternalTerminal + const terminal = addExternalTerminal( + info.terminalId, + t('auth.terminalTitle', { profileName: info.profileName }) + ); + + // If terminal creation failed (max terminals reached), show a notification + // The terminal was created in main process but we can't show it in UI + if (!terminal) { + toast({ + title: t('auth.maxTerminalsReached'), + variant: 'destructive', + }); + } + }); + + return unsubscribe; + }, [addExternalTerminal, t]); +} diff --git a/apps/frontend/src/renderer/hooks/useIpc.ts b/apps/frontend/src/renderer/hooks/useIpc.ts index 7e0f2f5e22..6539dd01b7 100644 --- a/apps/frontend/src/renderer/hooks/useIpc.ts +++ b/apps/frontend/src/renderer/hooks/useIpc.ts @@ -3,6 +3,7 @@ import { unstable_batchedUpdates } from 'react-dom'; import { useTaskStore } from '../stores/task-store'; import { useRoadmapStore } from '../stores/roadmap-store'; import { useRateLimitStore } from '../stores/rate-limit-store'; +import { useProjectStore } from '../stores/project-store'; import type { ImplementationPlan, TaskStatus, RoadmapGenerationStatus, Roadmap, ExecutionProgress, RateLimitInfo, SDKRateLimitInfo } from '../../shared/types'; /** @@ -111,6 +112,21 @@ function queueUpdate(taskId: string, update: BatchedUpdate): void { } } +/** + * Check if a task event is for the currently selected project. + * This prevents multi-project interference where events from one project's + * running task incorrectly update another project's task state (issue #723). + * Handles backward compatibility and no-project-selected cases. + */ +function isTaskForCurrentProject(eventProjectId?: string): boolean { + // If no projectId provided (backward compatibility), accept the event + if (!eventProjectId) return true; + const currentProjectId = useProjectStore.getState().selectedProjectId; + // If no project selected, accept the event + if (!currentProjectId) return true; + return currentProjectId === eventProjectId; +} + /** * Hook to set up IPC event listeners for task updates */ @@ -129,13 +145,17 @@ export function useIpcListeners(): void { useEffect(() => { // Set up listeners with batched updates const cleanupProgress = window.electronAPI.onTaskProgress( - (taskId: string, plan: ImplementationPlan) => { + (taskId: string, plan: ImplementationPlan, projectId?: string) => { + // Filter by project to prevent multi-project interference + if (!isTaskForCurrentProject(projectId)) return; queueUpdate(taskId, { plan }); } ); const cleanupError = window.electronAPI.onTaskError( - (taskId: string, error: string) => { + (taskId: string, error: string, projectId?: string) => { + // Filter by project to prevent multi-project interference (issue #723) + if (!isTaskForCurrentProject(projectId)) return; // Errors are not batched - show immediately setError(`Task ${taskId}: ${error}`); appendLog(taskId, `[ERROR] ${error}`); @@ -143,20 +163,28 @@ export function useIpcListeners(): void { ); const cleanupLog = window.electronAPI.onTaskLog( - (taskId: string, log: string) => { + (taskId: string, log: string, projectId?: string) => { + // Filter by project to prevent multi-project interference (issue #723) + if (!isTaskForCurrentProject(projectId)) return; // Logs are now batched to reduce state updates (was causing 100+ updates/sec) queueUpdate(taskId, { logs: [log] }); } ); const cleanupStatus = window.electronAPI.onTaskStatusChange( - (taskId: string, status: TaskStatus) => { + (taskId: string, status: TaskStatus, projectId?: string) => { + // Filter by project to prevent multi-project interference + if (!isTaskForCurrentProject(projectId)) return; queueUpdate(taskId, { status }); } ); const cleanupExecutionProgress = window.electronAPI.onTaskExecutionProgress( - (taskId: string, progress: ExecutionProgress) => { + (taskId: string, progress: ExecutionProgress, projectId?: string) => { + // Filter by project to prevent multi-project interference + // This is the critical fix for issue #723 - without this check, + // execution progress from Project A's task could update Project B's UI + if (!isTaskForCurrentProject(projectId)) return; queueUpdate(taskId, { progress }); } ); diff --git a/apps/frontend/src/renderer/index.html b/apps/frontend/src/renderer/index.html index 4d4d0550ee..8e5fc5ff07 100644 --- a/apps/frontend/src/renderer/index.html +++ b/apps/frontend/src/renderer/index.html @@ -3,7 +3,7 @@ - + diff --git a/apps/frontend/src/renderer/lib/browser-mock.ts b/apps/frontend/src/renderer/lib/browser-mock.ts index 917a84b25d..9c884b0627 100644 --- a/apps/frontend/src/renderer/lib/browser-mock.ts +++ b/apps/frontend/src/renderer/lib/browser-mock.ts @@ -110,6 +110,57 @@ const browserMockAPI: ElectronAPI = { // Infrastructure & Docker Operations ...infrastructureMock, + // API Profile Management (custom Anthropic-compatible endpoints) + getAPIProfiles: async () => ({ + success: true, + data: { + profiles: [], + activeProfileId: null, + version: 1 + } + }), + + saveAPIProfile: async (profile) => ({ + success: true, + data: { + id: `mock-profile-${Date.now()}`, + ...profile, + createdAt: Date.now(), + updatedAt: Date.now() + } + }), + + updateAPIProfile: async (profile) => ({ + success: true, + data: { + ...profile, + updatedAt: Date.now() + } + }), + + deleteAPIProfile: async (_profileId: string) => ({ + success: true + }), + + setActiveAPIProfile: async (_profileId: string | null) => ({ + success: true + }), + + testConnection: async (_baseUrl: string, _apiKey: string, _signal?: AbortSignal) => ({ + success: true, + data: { + success: true, + message: 'Connection successful (mock)' + } + }), + + discoverModels: async (_baseUrl: string, _apiKey: string, _signal?: AbortSignal) => ({ + success: true, + data: { + models: [] + } + }), + // GitHub API github: { getGitHubRepositories: async () => ({ success: true, data: [] }), @@ -146,6 +197,7 @@ const browserMockAPI: ElectronAPI = { onAutoFixComplete: () => () => {}, onAutoFixError: () => () => {}, listPRs: async () => [], + getPR: async () => null, runPRReview: () => {}, cancelPRReview: async () => true, postPRReview: async () => true, @@ -153,10 +205,14 @@ const browserMockAPI: ElectronAPI = { mergePR: async () => true, assignPR: async () => true, getPRReview: async () => null, + getPRReviewsBatch: async () => ({}), deletePRReview: async () => true, checkNewCommits: async () => ({ hasNewCommits: false, newCommitCount: 0 }), + checkMergeReadiness: async () => ({ isDraft: false, mergeable: 'UNKNOWN' as const, ciStatus: 'none' as const, blockers: [] }), runFollowupReview: () => {}, getPRLogs: async () => null, + getWorkflowsAwaitingApproval: async () => ({ awaiting_approval: 0, workflow_runs: [], can_approve: false }), + approveWorkflow: async () => true, onPRReviewProgress: () => () => {}, onPRReviewComplete: () => () => {}, onPRReviewError: () => () => {}, @@ -195,6 +251,20 @@ const browserMockAPI: ElectronAPI = { data: { command: 'npm install -g @anthropic-ai/claude-code' } }), + // Terminal Worktree Operations + createTerminalWorktree: async () => ({ + success: false, + error: 'Not available in browser mode' + }), + listTerminalWorktrees: async () => ({ + success: true, + data: [] + }), + removeTerminalWorktree: async () => ({ + success: false, + error: 'Not available in browser mode' + }), + // MCP Server Health Check Operations checkMcpHealth: async (server) => ({ success: true, diff --git a/apps/frontend/src/renderer/lib/mocks/infrastructure-mock.ts b/apps/frontend/src/renderer/lib/mocks/infrastructure-mock.ts index 057daf941a..81168fa011 100644 --- a/apps/frontend/src/renderer/lib/mocks/infrastructure-mock.ts +++ b/apps/frontend/src/renderer/lib/mocks/infrastructure-mock.ts @@ -174,28 +174,6 @@ export const infrastructureMock = { onIdeationTypeComplete: () => () => {}, onIdeationTypeFailed: () => () => {}, - // Auto-Build Source Update Operations - checkAutoBuildSourceUpdate: async () => ({ - success: true, - data: { - updateAvailable: true, - currentVersion: '1.0.0', - latestVersion: '1.1.0', - releaseNotes: '## v1.1.0\n\n- New feature: Enhanced spec creation\n- Bug fix: Improved error handling\n- Performance improvements' - } - }), - - downloadAutoBuildSourceUpdate: () => { - console.warn('[Browser Mock] downloadAutoBuildSourceUpdate called'); - }, - - getAutoBuildSourceVersion: async () => ({ - success: true, - data: '1.0.0' - }), - - onAutoBuildSourceUpdateProgress: () => () => {}, - // Shell Operations openExternal: async (url: string) => { console.warn('[Browser Mock] openExternal:', url); diff --git a/apps/frontend/src/renderer/lib/mocks/settings-mock.ts b/apps/frontend/src/renderer/lib/mocks/settings-mock.ts index 559dc12c01..97c833134f 100644 --- a/apps/frontend/src/renderer/lib/mocks/settings-mock.ts +++ b/apps/frontend/src/renderer/lib/mocks/settings-mock.ts @@ -13,6 +13,13 @@ export const settingsMock = { saveSettings: async () => ({ success: true }), + // Sentry error reporting + notifySentryStateChanged: (_enabled: boolean) => { + console.warn('[browser-mock] notifySentryStateChanged called'); + }, + getSentryDsn: async () => '', // No DSN in browser mode + getSentryConfig: async () => ({ dsn: '', tracesSampleRate: 0, profilesSampleRate: 0 }), + getCliToolsInfo: async () => ({ success: true, data: { @@ -29,10 +36,12 @@ export const settingsMock = { // App Update Operations (mock - no updates in browser mode) checkAppUpdate: async () => ({ success: true, data: null }), downloadAppUpdate: async () => ({ success: true }), + downloadStableUpdate: async () => ({ success: true }), installAppUpdate: () => { console.warn('[browser-mock] installAppUpdate called'); }, // App Update Event Listeners (no-op in browser mode) onAppUpdateAvailable: () => () => {}, onAppUpdateDownloaded: () => () => {}, - onAppUpdateProgress: () => () => {} + onAppUpdateProgress: () => () => {}, + onAppUpdateStableDowngrade: () => () => {} }; diff --git a/apps/frontend/src/renderer/lib/mocks/terminal-mock.ts b/apps/frontend/src/renderer/lib/mocks/terminal-mock.ts index c74943e5d0..4a8061657b 100644 --- a/apps/frontend/src/renderer/lib/mocks/terminal-mock.ts +++ b/apps/frontend/src/renderer/lib/mocks/terminal-mock.ts @@ -30,6 +30,14 @@ export const terminalMock = { data: 'Mock Terminal' }), + setTerminalTitle: () => { + console.warn('[Browser Mock] setTerminalTitle called'); + }, + + setTerminalWorktreeConfig: () => { + console.warn('[Browser Mock] setTerminalWorktreeConfig called'); + }, + // Terminal session management getTerminalSessions: async () => ({ success: true, @@ -50,6 +58,10 @@ export const terminalMock = { console.warn('[Browser Mock] resumeClaudeInTerminal called'); }, + activateDeferredClaudeResume: () => { + console.warn('[Browser Mock] activateDeferredClaudeResume called'); + }, + getTerminalSessionDates: async () => ({ success: true, data: [] @@ -82,5 +94,8 @@ export const terminalMock = { onTerminalTitleChange: () => () => {}, onTerminalClaudeSession: () => () => {}, onTerminalRateLimit: () => () => {}, - onTerminalOAuthToken: () => () => {} + onTerminalOAuthToken: () => () => {}, + onTerminalAuthCreated: () => () => {}, + onTerminalClaudeBusy: () => () => {}, + onTerminalPendingResume: () => () => {} }; diff --git a/apps/frontend/src/renderer/lib/profile-utils.ts b/apps/frontend/src/renderer/lib/profile-utils.ts new file mode 100644 index 0000000000..985a7a4951 --- /dev/null +++ b/apps/frontend/src/renderer/lib/profile-utils.ts @@ -0,0 +1,49 @@ +/** + * Profile Utility Functions + * + * Helper functions for API profile management in the renderer process. + */ + +/** + * Mask API key for display - shows only last 4 characters + * Example: sk-ant-test-key-1234 -> โ€ขโ€ขโ€ขโ€ข1234 + */ +export function maskApiKey(key: string): string { + if (!key || key.length <= 4) { + return 'โ€ขโ€ขโ€ขโ€ข'; + } + return `โ€ขโ€ขโ€ขโ€ข${key.slice(-4)}`; +} + +/** + * Validate if a string is a valid URL format + */ +export function isValidUrl(url: string): boolean { + if (!url || url.trim() === '') { + return false; + } + + try { + const urlObj = new URL(url); + return urlObj.protocol === 'http:' || urlObj.protocol === 'https:'; + } catch { + return false; + } +} + +/** + * Validate if a string looks like a valid API key + * (basic length and character check) + */ +export function isValidApiKey(key: string): boolean { + if (!key || key.trim() === '') { + return false; + } + + const trimmed = key.trim(); + if (trimmed.length < 12) { + return false; + } + + return /^[a-zA-Z0-9\-_+.]+$/.test(trimmed); +} diff --git a/apps/frontend/src/renderer/lib/sentry.ts b/apps/frontend/src/renderer/lib/sentry.ts new file mode 100644 index 0000000000..f5ca8722d1 --- /dev/null +++ b/apps/frontend/src/renderer/lib/sentry.ts @@ -0,0 +1,163 @@ +/** + * Sentry Error Tracking for Renderer Process + * + * Initializes Sentry with: + * - beforeSend hook that checks settings store (allows mid-session toggle) + * - Path masking for user privacy (shared with main process) + * - Function to notify main process when setting changes + * + * Privacy Note: + * - Usernames are masked from all file paths + * - Project paths remain visible for debugging (this is expected) + * - Tags, contexts, extra data, and user info are all sanitized + * + * DSN Configuration: + * - DSN is loaded from environment variable via main process IPC + * - If no DSN is configured, Sentry is disabled (safe for forks) + * + * Race Condition Prevention: + * - We track whether settings have been loaded from disk + * - Until settings are loaded, we default to NOT sending events + * - This respects user preference even during early app initialization + */ + +import * as Sentry from '@sentry/electron/renderer'; +import { useSettingsStore } from '../stores/settings-store'; +import { + processEvent, + type SentryErrorEvent +} from '../../shared/utils/sentry-privacy'; + +// Track whether settings have been loaded from disk +// This prevents sending events before we know user's preference +let settingsLoaded = false; + +// Track whether Sentry has been initialized +let sentryInitialized = false; + +/** + * Mark settings as loaded + * Called by settings store after initial load from disk + */ +export function markSettingsLoaded(): void { + settingsLoaded = true; + console.log('[Sentry] Settings loaded, error reporting ready'); +} + +/** + * Check if settings have been loaded + */ +export function areSettingsLoaded(): boolean { + return settingsLoaded; +} + +/** + * Initialize Sentry for renderer process + * Should be called early in renderer startup + * + * This is async because we need to fetch the DSN from the main process + */ +export async function initSentryRenderer(): Promise { + // Check if we're in Electron or browser environment + const isElectron = typeof window !== 'undefined' && !!window.electronAPI; + + if (!isElectron) { + console.log('[Sentry] Not in Electron environment, skipping initialization'); + return; + } + + // Get full Sentry config from main process (DSN + sample rates from env vars) + let config = { dsn: '', tracesSampleRate: 0, profilesSampleRate: 0 }; + try { + config = await window.electronAPI.getSentryConfig(); + } catch (error) { + console.warn('[Sentry] Failed to get config from main process:', error); + } + + const hasDsn = config.dsn.length > 0; + if (!hasDsn) { + console.log('[Sentry] No DSN configured - error reporting disabled in renderer'); + return; + } + + Sentry.init({ + dsn: config.dsn, + + beforeSend(event: Sentry.ErrorEvent) { + // Don't send events until settings are loaded + // This prevents sending events if user had disabled Sentry + if (!settingsLoaded) { + console.log('[Sentry] Settings not loaded yet, dropping event'); + return null; + } + + // Check current setting at send time (allows mid-session toggle) + try { + const currentSettings = useSettingsStore.getState().settings; + const isEnabled = currentSettings.sentryEnabled ?? true; + + if (!isEnabled) { + return null; + } + } catch (error) { + // If settings store fails, don't send event (be conservative) + console.error('[Sentry] Failed to read settings, dropping event:', error); + return null; + } + + // Process event with shared privacy utility + return processEvent(event as SentryErrorEvent) as Sentry.ErrorEvent; + }, + + // Sample rates from main process (configured via environment variables) + tracesSampleRate: config.tracesSampleRate, + profilesSampleRate: config.profilesSampleRate, + + // Enable in Electron environment when we have a DSN + enabled: true, + }); + + sentryInitialized = true; + console.log(`[Sentry] Renderer initialized (traces: ${config.tracesSampleRate}, profiles: ${config.profilesSampleRate})`); +} + +/** + * Check if Sentry has been initialized + */ +export function isSentryInitialized(): boolean { + return sentryInitialized; +} + +/** + * Notify main process when Sentry setting changes + * Call this whenever the user toggles the setting in the UI + */ +export function notifySentryStateChanged(enabled: boolean): void { + console.log(`[Sentry] Notifying main process: ${enabled ? 'enabled' : 'disabled'}`); + try { + window.electronAPI?.notifySentryStateChanged?.(enabled); + } catch (error) { + console.error('[Sentry] Failed to notify main process:', error); + } +} + +/** + * Manually capture an exception with Sentry + * Useful for error boundaries or try/catch blocks + */ +export function captureException(error: Error, context?: Record): void { + if (!sentryInitialized) { + // Sentry not initialized (no DSN configured), just log + console.error('[Sentry] Not initialized, error not captured:', error); + return; + } + + if (context) { + Sentry.withScope((scope) => { + scope.setContext('additional', context); + Sentry.captureException(error); + }); + } else { + Sentry.captureException(error); + } +} diff --git a/apps/frontend/src/renderer/lib/utils.ts b/apps/frontend/src/renderer/lib/utils.ts index dfed71522a..2799994fe9 100644 --- a/apps/frontend/src/renderer/lib/utils.ts +++ b/apps/frontend/src/renderer/lib/utils.ts @@ -79,8 +79,8 @@ export function sanitizeMarkdownForDisplay(text: string, maxLength: number = 200 .replace(/\s+/g, ' ') .trim(); - // Truncate if needed - if (sanitized.length > maxLength) { + // Truncate if needed (0 means no truncation) + if (maxLength > 0 && sanitized.length > maxLength) { sanitized = sanitized.substring(0, maxLength).trim() + '...'; } diff --git a/apps/frontend/src/renderer/main.tsx b/apps/frontend/src/renderer/main.tsx index cad7aca3c4..56339747aa 100644 --- a/apps/frontend/src/renderer/main.tsx +++ b/apps/frontend/src/renderer/main.tsx @@ -4,6 +4,13 @@ import './lib/browser-mock'; // Initialize i18n before React import '../shared/i18n'; +// Initialize Sentry for error tracking (respects user's sentryEnabled setting) +// Fire-and-forget: React rendering proceeds immediately while Sentry initializes async +import { initSentryRenderer } from './lib/sentry'; +initSentryRenderer().catch((err) => { + console.warn('[Sentry] Failed to initialize renderer:', err); +}); + import React from 'react'; import ReactDOM from 'react-dom/client'; import { App } from './App'; diff --git a/apps/frontend/src/renderer/stores/settings-store.ts b/apps/frontend/src/renderer/stores/settings-store.ts index 41ed161bed..71c21f0fe1 100644 --- a/apps/frontend/src/renderer/stores/settings-store.ts +++ b/apps/frontend/src/renderer/stores/settings-store.ts @@ -1,17 +1,46 @@ import { create } from 'zustand'; import type { AppSettings } from '../../shared/types'; +import type { APIProfile, ProfileFormData, TestConnectionResult, DiscoverModelsResult, ModelInfo } from '@shared/types/profile'; import { DEFAULT_APP_SETTINGS } from '../../shared/constants'; +import { toast } from '../hooks/use-toast'; +import { markSettingsLoaded } from '../lib/sentry'; interface SettingsState { settings: AppSettings; isLoading: boolean; error: string | null; + // API Profile state + profiles: APIProfile[]; + activeProfileId: string | null; + profilesLoading: boolean; + profilesError: string | null; + + // Test connection state + isTestingConnection: boolean; + testConnectionResult: TestConnectionResult | null; + + // Model discovery state + modelsLoading: boolean; + modelsError: string | null; + discoveredModels: Map; // Cache key -> models mapping + // Actions setSettings: (settings: AppSettings) => void; updateSettings: (updates: Partial) => void; setLoading: (loading: boolean) => void; setError: (error: string | null) => void; + + // Profile actions + setProfiles: (profiles: APIProfile[], activeProfileId: string | null) => void; + setProfilesLoading: (loading: boolean) => void; + setProfilesError: (error: string | null) => void; + saveProfile: (profile: ProfileFormData) => Promise; + updateProfile: (profile: APIProfile) => Promise; + deleteProfile: (profileId: string) => Promise; + setActiveProfile: (profileId: string | null) => Promise; + testConnection: (baseUrl: string, apiKey: string, signal?: AbortSignal) => Promise; + discoverModels: (baseUrl: string, apiKey: string, signal?: AbortSignal) => Promise; } export const useSettingsStore = create((set) => ({ @@ -19,6 +48,21 @@ export const useSettingsStore = create((set) => ({ isLoading: true, // Start as true since we load settings on app init error: null, + // API Profile state + profiles: [], + activeProfileId: null, + profilesLoading: false, + profilesError: null, + + // Test connection state + isTestingConnection: false, + testConnectionResult: null, + + // Model discovery state + modelsLoading: false, + modelsError: null, + discoveredModels: new Map(), + setSettings: (settings) => set({ settings }), updateSettings: (updates) => @@ -28,7 +72,227 @@ export const useSettingsStore = create((set) => ({ setLoading: (isLoading) => set({ isLoading }), - setError: (error) => set({ error }) + setError: (error) => set({ error }), + + // Profile actions + setProfiles: (profiles, activeProfileId) => set({ profiles, activeProfileId }), + + setProfilesLoading: (profilesLoading) => set({ profilesLoading }), + + setProfilesError: (profilesError) => set({ profilesError }), + + saveProfile: async (profile: ProfileFormData): Promise => { + set({ profilesLoading: true, profilesError: null }); + try { + const result = await window.electronAPI.saveAPIProfile(profile); + if (result.success && result.data) { + // Re-fetch profiles from backend to get authoritative activeProfileId + // (backend only auto-activates the first profile) + try { + const profilesResult = await window.electronAPI.getAPIProfiles(); + if (profilesResult.success && profilesResult.data) { + set({ + profiles: profilesResult.data.profiles, + activeProfileId: profilesResult.data.activeProfileId, + profilesLoading: false + }); + } else { + // Fallback: add profile locally but don't assume activeProfileId + set((state) => ({ + profiles: [...state.profiles, result.data!], + profilesLoading: false + })); + } + } catch { + // Fallback on fetch error: add profile locally + set((state) => ({ + profiles: [...state.profiles, result.data!], + profilesLoading: false + })); + } + return true; + } + set({ + profilesError: result.error || 'Failed to save profile', + profilesLoading: false + }); + return false; + } catch (error) { + set({ + profilesError: error instanceof Error ? error.message : 'Failed to save profile', + profilesLoading: false + }); + return false; + } + }, + + updateProfile: async (profile: APIProfile): Promise => { + set({ profilesLoading: true, profilesError: null }); + try { + const result = await window.electronAPI.updateAPIProfile(profile); + if (result.success && result.data) { + set((state) => ({ + profiles: state.profiles.map((p) => + p.id === result.data!.id ? result.data! : p + ), + profilesLoading: false + })); + return true; + } + set({ + profilesError: result.error || 'Failed to update profile', + profilesLoading: false + }); + return false; + } catch (error) { + set({ + profilesError: error instanceof Error ? error.message : 'Failed to update profile', + profilesLoading: false + }); + return false; + } + }, + + deleteProfile: async (profileId: string): Promise => { + set({ profilesLoading: true, profilesError: null }); + try { + const result = await window.electronAPI.deleteAPIProfile(profileId); + if (result.success) { + set((state) => ({ + profiles: state.profiles.filter((p) => p.id !== profileId), + activeProfileId: state.activeProfileId === profileId ? null : state.activeProfileId, + profilesLoading: false + })); + return true; + } + set({ + profilesError: result.error || 'Failed to delete profile', + profilesLoading: false + }); + return false; + } catch (error) { + set({ + profilesError: error instanceof Error ? error.message : 'Failed to delete profile', + profilesLoading: false + }); + return false; + } + }, + + setActiveProfile: async (profileId: string | null): Promise => { + set({ profilesLoading: true, profilesError: null }); + try { + const result = await window.electronAPI.setActiveAPIProfile(profileId); + if (result.success) { + set({ activeProfileId: profileId, profilesLoading: false }); + return true; + } + set({ + profilesError: result.error || 'Failed to set active profile', + profilesLoading: false + }); + return false; + } catch (error) { + set({ + profilesError: error instanceof Error ? error.message : 'Failed to set active profile', + profilesLoading: false + }); + return false; + } + }, + + testConnection: async (baseUrl: string, apiKey: string, signal?: AbortSignal): Promise => { + set({ isTestingConnection: true, testConnectionResult: null }); + try { + const result = await window.electronAPI.testConnection(baseUrl, apiKey, signal); + + // Type narrowing pattern + if (result.success && result.data) { + set({ testConnectionResult: result.data, isTestingConnection: false }); + + // Show toast on success + // TODO: Use i18n translation keys (settings:connection.successTitle, settings:connection.successDescription) + // Note: Zustand stores can't use useTranslation() hook - need to pass t() or use i18n.t() + if (result.data.success) { + toast({ + title: 'Connection successful', + description: 'Your API credentials are valid.' + }); + } + return result.data; + } + + // Error from IPC layer - set testConnectionResult for inline display + const errorResult: TestConnectionResult = { + success: false, + errorType: 'unknown', + message: result.error || 'Failed to test connection' + }; + set({ testConnectionResult: errorResult, isTestingConnection: false }); + toast({ + variant: 'destructive', + title: 'Connection test failed', + description: result.error || 'Failed to test connection' + }); + return errorResult; + } catch (error) { + // Unexpected error - set testConnectionResult for inline display + const errorResult: TestConnectionResult = { + success: false, + errorType: 'unknown', + message: error instanceof Error ? error.message : 'Failed to test connection' + }; + set({ testConnectionResult: errorResult, isTestingConnection: false }); + toast({ + variant: 'destructive', + title: 'Connection test failed', + description: error instanceof Error ? error.message : 'Failed to test connection' + }); + return errorResult; + } + }, + + discoverModels: async (baseUrl: string, apiKey: string, signal?: AbortSignal): Promise => { + console.log('[settings-store] discoverModels called with:', { baseUrl, apiKey: `${apiKey.slice(-4)}` }); + // Generate cache key from baseUrl and apiKey (last 4 chars) + const cacheKey = `${baseUrl}::${apiKey.slice(-4)}`; + + // Check cache first + const state = useSettingsStore.getState(); + const cached = state.discoveredModels.get(cacheKey); + if (cached) { + console.log('[settings-store] Returning cached models'); + return cached; + } + + // Fetch from API + set({ modelsLoading: true, modelsError: null }); + try { + console.log('[settings-store] Calling window.electronAPI.discoverModels...'); + const result = await window.electronAPI.discoverModels(baseUrl, apiKey, signal); + console.log('[settings-store] discoverModels result:', result); + + if (result.success && result.data) { + const models = result.data.models; + // Cache the results + set((state) => ({ + discoveredModels: new Map(state.discoveredModels).set(cacheKey, models), + modelsLoading: false + })); + return models; + } + + // Error from IPC layer + set({ modelsError: result.error || 'Failed to discover models', modelsLoading: false }); + return null; + } catch (error) { + set({ + modelsError: error instanceof Error ? error.message : 'Failed to discover models', + modelsLoading: false + }); + return null; + } + } })); /** @@ -79,9 +343,18 @@ export async function loadSettings(): Promise { onboardingCompleted: migratedSettings.onboardingCompleted }); } + + // Only mark settings as loaded on SUCCESS + // This ensures Sentry respects user's opt-out preference even if settings fail to load + // (If settings fail to load, Sentry's beforeSend drops all events until successful load) + markSettingsLoaded(); } + // Note: If result.success is false, we intentionally do NOT mark settings as loaded. + // This means Sentry will drop events, which is the safe default for privacy. } catch (error) { store.setError(error instanceof Error ? error.message : 'Failed to load settings'); + // Note: On exception, we intentionally do NOT mark settings as loaded. + // Sentry's beforeSend will drop events, respecting potential user opt-out. } finally { store.setLoading(false); } @@ -104,3 +377,22 @@ export async function saveSettings(updates: Partial): Promise { + const store = useSettingsStore.getState(); + store.setProfilesLoading(true); + + try { + const result = await window.electronAPI.getAPIProfiles(); + if (result.success && result.data) { + store.setProfiles(result.data.profiles, result.data.activeProfileId); + } + } catch (error) { + store.setProfilesError(error instanceof Error ? error.message : 'Failed to load profiles'); + } finally { + store.setProfilesLoading(false); + } +} diff --git a/apps/frontend/src/renderer/stores/terminal-store.ts b/apps/frontend/src/renderer/stores/terminal-store.ts index bb904bc5ac..4975603cd0 100644 --- a/apps/frontend/src/renderer/stores/terminal-store.ts +++ b/apps/frontend/src/renderer/stores/terminal-store.ts @@ -1,6 +1,7 @@ import { create } from 'zustand'; import { v4 as uuid } from 'uuid'; -import type { TerminalSession } from '../../shared/types'; +import { arrayMove } from '@dnd-kit/sortable'; +import type { TerminalSession, TerminalWorktreeConfig } from '../../shared/types'; import { terminalBufferManager } from '../lib/terminal-buffer-manager'; import { debugLog, debugError } from '../../shared/utils/debug-logger'; @@ -18,6 +19,9 @@ export interface Terminal { isRestored?: boolean; // Whether this terminal was restored from a saved session associatedTaskId?: string; // ID of task associated with this terminal (for context loading) projectPath?: string; // Project this terminal belongs to (for multi-project support) + worktreeConfig?: TerminalWorktreeConfig; // Associated worktree for isolated development + isClaudeBusy?: boolean; // Whether Claude Code is actively processing (for visual indicator) + pendingClaudeResume?: boolean; // Whether this terminal has a pending Claude resume (deferred until tab activated) } interface TerminalLayout { @@ -38,6 +42,8 @@ interface TerminalState { // Actions addTerminal: (cwd?: string, projectPath?: string) => Terminal | null; addRestoredTerminal: (session: TerminalSession) => Terminal; + // Add a terminal with a specific ID (for terminals created in main process, like OAuth login terminals) + addExternalTerminal: (id: string, title: string, cwd?: string, projectPath?: string) => Terminal | null; removeTerminal: (id: string) => void; updateTerminal: (id: string, updates: Partial) => void; setActiveTerminal: (id: string | null) => void; @@ -45,14 +51,19 @@ interface TerminalState { setClaudeMode: (id: string, isClaudeMode: boolean) => void; setClaudeSessionId: (id: string, sessionId: string) => void; setAssociatedTask: (id: string, taskId: string | undefined) => void; + setWorktreeConfig: (id: string, config: TerminalWorktreeConfig | undefined) => void; + setClaudeBusy: (id: string, isBusy: boolean) => void; + setPendingClaudeResume: (id: string, pending: boolean) => void; clearAllTerminals: () => void; setHasRestoredSessions: (value: boolean) => void; + reorderTerminals: (activeId: string, overId: string) => void; // Selectors getTerminal: (id: string) => Terminal | undefined; getActiveTerminal: () => Terminal | undefined; - canAddTerminal: () => boolean; + canAddTerminal: (projectPath?: string) => boolean; getTerminalsForProject: (projectPath: string) => Terminal[]; + getWorktreeCount: () => number; } export const useTerminalStore = create((set, get) => ({ @@ -102,11 +113,15 @@ export const useTerminalStore = create((set, get) => ({ status: 'idle', // Will be updated to 'running' when PTY is created cwd: session.cwd, createdAt: new Date(session.createdAt), - isClaudeMode: session.isClaudeMode, + // Reset Claude mode to false - Claude Code is killed on app restart + // Keep claudeSessionId so users can resume by clicking the invoke button + isClaudeMode: false, claudeSessionId: session.claudeSessionId, // outputBuffer now stored in terminalBufferManager isRestored: true, projectPath: session.projectPath, + // Worktree config is validated in main process before restore + worktreeConfig: session.worktreeConfig, }; // Restore buffer to buffer manager @@ -122,6 +137,42 @@ export const useTerminalStore = create((set, get) => ({ return restoredTerminal; }, + addExternalTerminal: (id: string, title: string, cwd?: string, projectPath?: string) => { + const state = get(); + + // Check if terminal with this ID already exists + const existingTerminal = state.terminals.find(t => t.id === id); + if (existingTerminal) { + // Just activate it and return it + set({ activeTerminalId: id }); + return existingTerminal; + } + + // Use the same logic as canAddTerminal - count only non-exited terminals + // This ensures consistency and doesn't block new terminals when only exited ones exist + const activeTerminalCount = state.terminals.filter(t => t.status !== 'exited').length; + if (activeTerminalCount >= state.maxTerminals) { + return null; + } + + const newTerminal: Terminal = { + id, + title, + status: 'running', // External terminals are already running + cwd: cwd || process.env.HOME || '~', + createdAt: new Date(), + isClaudeMode: false, + projectPath, + }; + + set((state) => ({ + terminals: [...state.terminals, newTerminal], + activeTerminalId: newTerminal.id, + })); + + return newTerminal; + }, + removeTerminal: (id: string) => { // Clean up buffer manager terminalBufferManager.dispose(id); @@ -163,7 +214,13 @@ export const useTerminalStore = create((set, get) => ({ set((state) => ({ terminals: state.terminals.map((t) => t.id === id - ? { ...t, isClaudeMode, status: isClaudeMode ? 'claude-active' : 'running' } + ? { + ...t, + isClaudeMode, + status: isClaudeMode ? 'claude-active' : 'running', + // Reset busy state when leaving Claude mode + isClaudeBusy: isClaudeMode ? t.isClaudeBusy : undefined + } : t ), })); @@ -185,6 +242,30 @@ export const useTerminalStore = create((set, get) => ({ })); }, + setWorktreeConfig: (id: string, config: TerminalWorktreeConfig | undefined) => { + set((state) => ({ + terminals: state.terminals.map((t) => + t.id === id ? { ...t, worktreeConfig: config } : t + ), + })); + }, + + setClaudeBusy: (id: string, isBusy: boolean) => { + set((state) => ({ + terminals: state.terminals.map((t) => + t.id === id ? { ...t, isClaudeBusy: isBusy } : t + ), + })); + }, + + setPendingClaudeResume: (id: string, pending: boolean) => { + set((state) => ({ + terminals: state.terminals.map((t) => + t.id === id ? { ...t, pendingClaudeResume: pending } : t + ), + })); + }, + clearAllTerminals: () => { set({ terminals: [], activeTerminalId: null, hasRestoredSessions: false }); }, @@ -193,6 +274,21 @@ export const useTerminalStore = create((set, get) => ({ set({ hasRestoredSessions: value }); }, + reorderTerminals: (activeId: string, overId: string) => { + set((state) => { + const oldIndex = state.terminals.findIndex((t) => t.id === activeId); + const newIndex = state.terminals.findIndex((t) => t.id === overId); + + if (oldIndex === -1 || newIndex === -1) { + return state; + } + + return { + terminals: arrayMove(state.terminals, oldIndex, newIndex), + }; + }); + }, + getTerminal: (id: string) => { return get().terminals.find((t) => t.id === id); }, @@ -202,14 +298,28 @@ export const useTerminalStore = create((set, get) => ({ return state.terminals.find((t) => t.id === state.activeTerminalId); }, - canAddTerminal: () => { + canAddTerminal: (projectPath?: string) => { const state = get(); - return state.terminals.length < state.maxTerminals; + // Count only non-exited terminals, optionally filtered by project + const activeTerminals = state.terminals.filter(t => { + // Exclude exited terminals from the count + if (t.status === 'exited') return false; + // If projectPath specified, only count terminals for that project (or legacy without projectPath) + if (projectPath) { + return t.projectPath === projectPath || !t.projectPath; + } + return true; + }); + return activeTerminals.length < state.maxTerminals; }, getTerminalsForProject: (projectPath: string) => { return get().terminals.filter(t => t.projectPath === projectPath); }, + + getWorktreeCount: () => { + return get().terminals.filter(t => t.worktreeConfig).length; + }, })); // Track in-progress restore operations to prevent race conditions diff --git a/apps/frontend/src/shared/constants/api-profiles.ts b/apps/frontend/src/shared/constants/api-profiles.ts new file mode 100644 index 0000000000..99c72138ce --- /dev/null +++ b/apps/frontend/src/shared/constants/api-profiles.ts @@ -0,0 +1,33 @@ +export type ApiProviderPreset = { + id: string; + baseUrl: string; + labelKey: string; +}; + +export const API_PROVIDER_PRESETS: readonly ApiProviderPreset[] = [ + { + id: 'anthropic', + baseUrl: 'https://api.anthropic.com', + labelKey: 'settings:apiProfiles.presets.anthropic' + }, + { + id: 'openrouter', + baseUrl: 'https://openrouter.ai/api/v1', + labelKey: 'settings:apiProfiles.presets.openrouter' + }, + { + id: 'groq', + baseUrl: 'https://api.groq.com/openai/v1', + labelKey: 'settings:apiProfiles.presets.groq' + }, + { + id: 'glm-global', + baseUrl: 'https://api.z.ai/api/anthropic', + labelKey: 'settings:apiProfiles.presets.glmGlobal' + }, + { + id: 'glm-cn', + baseUrl: 'https://open.bigmodel.cn/api/paas/v4', + labelKey: 'settings:apiProfiles.presets.glmChina' + } +]; diff --git a/apps/frontend/src/shared/constants/config.ts b/apps/frontend/src/shared/constants/config.ts index 093c77863a..daa0954e48 100644 --- a/apps/frontend/src/shared/constants/config.ts +++ b/apps/frontend/src/shared/constants/config.ts @@ -48,7 +48,9 @@ export const DEFAULT_APP_SETTINGS = { // Beta updates opt-in (receive pre-release versions) betaUpdates: false, // Language preference (default to English) - language: 'en' as const + language: 'en' as const, + // Anonymous error reporting (Sentry) - enabled by default to help improve the app + sentryEnabled: true }; // ============================================ diff --git a/apps/frontend/src/shared/constants/index.ts b/apps/frontend/src/shared/constants/index.ts index ea90dce632..5b3f49872f 100644 --- a/apps/frontend/src/shared/constants/index.ts +++ b/apps/frontend/src/shared/constants/index.ts @@ -30,5 +30,8 @@ export * from './themes'; // GitHub integration constants export * from './github'; +// API profile presets +export * from './api-profiles'; + // Configuration and paths export * from './config'; diff --git a/apps/frontend/src/shared/constants/ipc.ts b/apps/frontend/src/shared/constants/ipc.ts index 5169f934a9..078f63a1ed 100644 --- a/apps/frontend/src/shared/constants/ipc.ts +++ b/apps/frontend/src/shared/constants/ipc.ts @@ -63,24 +63,35 @@ export const IPC_CHANNELS = { TERMINAL_RESIZE: 'terminal:resize', TERMINAL_INVOKE_CLAUDE: 'terminal:invokeClaude', TERMINAL_GENERATE_NAME: 'terminal:generateName', + TERMINAL_SET_TITLE: 'terminal:setTitle', // Renderer -> Main: user renamed terminal + TERMINAL_SET_WORKTREE_CONFIG: 'terminal:setWorktreeConfig', // Renderer -> Main: worktree association changed // Terminal session management TERMINAL_GET_SESSIONS: 'terminal:getSessions', TERMINAL_RESTORE_SESSION: 'terminal:restoreSession', TERMINAL_CLEAR_SESSIONS: 'terminal:clearSessions', TERMINAL_RESUME_CLAUDE: 'terminal:resumeClaude', + TERMINAL_ACTIVATE_DEFERRED_RESUME: 'terminal:activateDeferredResume', // Trigger deferred Claude resume when terminal becomes active TERMINAL_GET_SESSION_DATES: 'terminal:getSessionDates', TERMINAL_GET_SESSIONS_FOR_DATE: 'terminal:getSessionsForDate', TERMINAL_RESTORE_FROM_DATE: 'terminal:restoreFromDate', TERMINAL_CHECK_PTY_ALIVE: 'terminal:checkPtyAlive', + // Terminal worktree operations (isolated development in worktrees) + TERMINAL_WORKTREE_CREATE: 'terminal:worktreeCreate', + TERMINAL_WORKTREE_REMOVE: 'terminal:worktreeRemove', + TERMINAL_WORKTREE_LIST: 'terminal:worktreeList', + // Terminal events (main -> renderer) TERMINAL_OUTPUT: 'terminal:output', TERMINAL_EXIT: 'terminal:exit', TERMINAL_TITLE_CHANGE: 'terminal:titleChange', TERMINAL_CLAUDE_SESSION: 'terminal:claudeSession', // Claude session ID captured + TERMINAL_PENDING_RESUME: 'terminal:pendingResume', // Terminal has pending Claude resume (for deferred activation) TERMINAL_RATE_LIMIT: 'terminal:rateLimit', // Claude Code rate limit detected TERMINAL_OAUTH_TOKEN: 'terminal:oauthToken', // OAuth token captured from setup-token output + TERMINAL_AUTH_CREATED: 'terminal:authCreated', // Auth terminal created for OAuth flow + TERMINAL_CLAUDE_BUSY: 'terminal:claudeBusy', // Claude Code busy state (for visual indicator) // Claude profile management (multi-account support) CLAUDE_PROFILES_GET: 'claude:profilesGet', @@ -111,6 +122,17 @@ export const IPC_CHANNELS = { SETTINGS_SAVE: 'settings:save', SETTINGS_GET_CLI_TOOLS_INFO: 'settings:getCliToolsInfo', + // API Profile management (custom Anthropic-compatible endpoints) + PROFILES_GET: 'profiles:get', + PROFILES_SAVE: 'profiles:save', + PROFILES_UPDATE: 'profiles:update', + PROFILES_DELETE: 'profiles:delete', + PROFILES_SET_ACTIVE: 'profiles:setActive', + PROFILES_TEST_CONNECTION: 'profiles:test-connection', + PROFILES_TEST_CONNECTION_CANCEL: 'profiles:test-connection-cancel', + PROFILES_DISCOVER_MODELS: 'profiles:discover-models', + PROFILES_DISCOVER_MODELS_CANCEL: 'profiles:discover-models-cancel', + // Dialogs DIALOG_SELECT_DIRECTORY: 'dialog:selectDirectory', DIALOG_CREATE_PROJECT_FOLDER: 'dialog:createProjectFolder', @@ -337,6 +359,7 @@ export const IPC_CHANNELS = { GITHUB_PR_REVIEW: 'github:pr:review', GITHUB_PR_REVIEW_CANCEL: 'github:pr:reviewCancel', GITHUB_PR_GET_REVIEW: 'github:pr:getReview', + GITHUB_PR_GET_REVIEWS_BATCH: 'github:pr:getReviewsBatch', // Batch load reviews for multiple PRs GITHUB_PR_POST_REVIEW: 'github:pr:postReview', GITHUB_PR_DELETE_REVIEW: 'github:pr:deleteReview', GITHUB_PR_MERGE: 'github:pr:merge', @@ -345,6 +368,7 @@ export const IPC_CHANNELS = { GITHUB_PR_FIX: 'github:pr:fix', GITHUB_PR_FOLLOWUP_REVIEW: 'github:pr:followupReview', GITHUB_PR_CHECK_NEW_COMMITS: 'github:pr:checkNewCommits', + GITHUB_PR_CHECK_MERGE_READINESS: 'github:pr:checkMergeReadiness', // GitHub PR Review events (main -> renderer) GITHUB_PR_REVIEW_PROGRESS: 'github:pr:reviewProgress', @@ -354,6 +378,14 @@ export const IPC_CHANNELS = { // GitHub PR Logs (for viewing AI review logs) GITHUB_PR_GET_LOGS: 'github:pr:getLogs', + // GitHub PR Memory operations (saves review insights to memory layer) + GITHUB_PR_MEMORY_GET: 'github:pr:memory:get', // Get PR review memories + GITHUB_PR_MEMORY_SEARCH: 'github:pr:memory:search', // Search PR review memories + + // GitHub Workflow Approval (for fork PRs) + GITHUB_WORKFLOWS_AWAITING_APPROVAL: 'github:workflows:awaitingApproval', + GITHUB_WORKFLOW_APPROVE: 'github:workflow:approve', + // GitHub Issue Triage operations GITHUB_TRIAGE_RUN: 'github:triage:run', GITHUB_TRIAGE_GET_RESULTS: 'github:triage:getResults', @@ -384,12 +416,6 @@ export const IPC_CHANNELS = { OLLAMA_PULL_MODEL: 'ollama:pullModel', OLLAMA_PULL_PROGRESS: 'ollama:pullProgress', - // Auto Claude source updates - AUTOBUILD_SOURCE_CHECK: 'autobuild:source:check', - AUTOBUILD_SOURCE_DOWNLOAD: 'autobuild:source:download', - AUTOBUILD_SOURCE_VERSION: 'autobuild:source:version', - AUTOBUILD_SOURCE_PROGRESS: 'autobuild:source:progress', - // Auto Claude source environment configuration AUTOBUILD_SOURCE_ENV_GET: 'autobuild:source:env:get', AUTOBUILD_SOURCE_ENV_UPDATE: 'autobuild:source:env:update', @@ -447,6 +473,7 @@ export const IPC_CHANNELS = { // App auto-update operations APP_UPDATE_CHECK: 'app-update:check', APP_UPDATE_DOWNLOAD: 'app-update:download', + APP_UPDATE_DOWNLOAD_STABLE: 'app-update:download-stable', // Download stable version (for downgrade from beta) APP_UPDATE_INSTALL: 'app-update:install', APP_UPDATE_GET_VERSION: 'app-update:get-version', @@ -455,6 +482,7 @@ export const IPC_CHANNELS = { APP_UPDATE_DOWNLOADED: 'app-update:downloaded', APP_UPDATE_PROGRESS: 'app-update:progress', APP_UPDATE_ERROR: 'app-update:error', + APP_UPDATE_STABLE_DOWNGRADE: 'app-update:stable-downgrade', // Stable version available for downgrade from beta // Release operations RELEASE_SUGGEST_VERSION: 'release:suggestVersion', @@ -478,5 +506,10 @@ export const IPC_CHANNELS = { // MCP Server health checks MCP_CHECK_HEALTH: 'mcp:checkHealth', // Quick connectivity check - MCP_TEST_CONNECTION: 'mcp:testConnection' // Full MCP protocol test + MCP_TEST_CONNECTION: 'mcp:testConnection', // Full MCP protocol test + + // Sentry error reporting + SENTRY_STATE_CHANGED: 'sentry:state-changed', // Notify main process when setting changes + GET_SENTRY_DSN: 'sentry:get-dsn', // Get DSN from main process (env var) + GET_SENTRY_CONFIG: 'sentry:get-config' // Get full Sentry config (DSN + sample rates) } as const; diff --git a/apps/frontend/src/shared/constants/models.ts b/apps/frontend/src/shared/constants/models.ts index 4c8a31e169..ddff3792f8 100644 --- a/apps/frontend/src/shared/constants/models.ts +++ b/apps/frontend/src/shared/constants/models.ts @@ -45,26 +45,76 @@ export const THINKING_LEVELS = [ ] as const; // ============================================ -// Agent Profiles +// Agent Profiles - Phase Configurations // ============================================ -// Default phase model configuration for Auto profile -// Uses Opus across all phases for maximum quality -export const DEFAULT_PHASE_MODELS: PhaseModelConfig = { - spec: 'opus', // Best quality for spec creation - planning: 'opus', // Complex architecture decisions benefit from Opus - coding: 'opus', // Highest quality implementation - qa: 'opus' // Thorough QA review +// Phase configurations for each preset profile +// Each profile has its own default phase models and thinking levels + +// Auto (Optimized) - Opus with optimized thinking per phase +export const AUTO_PHASE_MODELS: PhaseModelConfig = { + spec: 'opus', + planning: 'opus', + coding: 'opus', + qa: 'opus' }; -// Default phase thinking configuration for Auto profile -export const DEFAULT_PHASE_THINKING: import('../types/settings').PhaseThinkingConfig = { +export const AUTO_PHASE_THINKING: import('../types/settings').PhaseThinkingConfig = { spec: 'ultrathink', // Deep thinking for comprehensive spec creation planning: 'high', // High thinking for planning complex features coding: 'low', // Faster coding iterations qa: 'low' // Efficient QA review }; +// Complex Tasks - Opus with ultrathink across all phases +export const COMPLEX_PHASE_MODELS: PhaseModelConfig = { + spec: 'opus', + planning: 'opus', + coding: 'opus', + qa: 'opus' +}; + +export const COMPLEX_PHASE_THINKING: import('../types/settings').PhaseThinkingConfig = { + spec: 'ultrathink', + planning: 'ultrathink', + coding: 'ultrathink', + qa: 'ultrathink' +}; + +// Balanced - Sonnet with medium thinking across all phases +export const BALANCED_PHASE_MODELS: PhaseModelConfig = { + spec: 'sonnet', + planning: 'sonnet', + coding: 'sonnet', + qa: 'sonnet' +}; + +export const BALANCED_PHASE_THINKING: import('../types/settings').PhaseThinkingConfig = { + spec: 'medium', + planning: 'medium', + coding: 'medium', + qa: 'medium' +}; + +// Quick Edits - Haiku with low thinking across all phases +export const QUICK_PHASE_MODELS: PhaseModelConfig = { + spec: 'haiku', + planning: 'haiku', + coding: 'haiku', + qa: 'haiku' +}; + +export const QUICK_PHASE_THINKING: import('../types/settings').PhaseThinkingConfig = { + spec: 'low', + planning: 'low', + coding: 'low', + qa: 'low' +}; + +// Default phase configuration (used for fallback, matches 'Balanced' profile for cost-effectiveness) +export const DEFAULT_PHASE_MODELS: PhaseModelConfig = BALANCED_PHASE_MODELS; +export const DEFAULT_PHASE_THINKING: import('../types/settings').PhaseThinkingConfig = BALANCED_PHASE_THINKING; + // ============================================ // Feature Settings (Non-Pipeline Features) // ============================================ @@ -100,17 +150,17 @@ export const FEATURE_LABELS: Record = { - backlog: 'Planning', - in_progress: 'In Progress', - ai_review: 'AI Review', - human_review: 'Human Review', - done: 'Done' + backlog: 'columns.backlog', + in_progress: 'columns.in_progress', + ai_review: 'columns.ai_review', + human_review: 'columns.human_review', + done: 'columns.done' }; // Status colors for UI diff --git a/apps/frontend/src/shared/i18n/index.ts b/apps/frontend/src/shared/i18n/index.ts index 79c126e805..224924f3fb 100644 --- a/apps/frontend/src/shared/i18n/index.ts +++ b/apps/frontend/src/shared/i18n/index.ts @@ -11,6 +11,7 @@ import enOnboarding from './locales/en/onboarding.json'; import enDialogs from './locales/en/dialogs.json'; import enGitlab from './locales/en/gitlab.json'; import enTaskReview from './locales/en/taskReview.json'; +import enTerminal from './locales/en/terminal.json'; // Import French translation resources import frCommon from './locales/fr/common.json'; @@ -22,6 +23,7 @@ import frOnboarding from './locales/fr/onboarding.json'; import frDialogs from './locales/fr/dialogs.json'; import frGitlab from './locales/fr/gitlab.json'; import frTaskReview from './locales/fr/taskReview.json'; +import frTerminal from './locales/fr/terminal.json'; export const defaultNS = 'common'; @@ -35,7 +37,8 @@ export const resources = { onboarding: enOnboarding, dialogs: enDialogs, gitlab: enGitlab, - taskReview: enTaskReview + taskReview: enTaskReview, + terminal: enTerminal }, fr: { common: frCommon, @@ -46,7 +49,8 @@ export const resources = { onboarding: frOnboarding, dialogs: frDialogs, gitlab: frGitlab, - taskReview: frTaskReview + taskReview: frTaskReview, + terminal: frTerminal } } as const; @@ -57,7 +61,7 @@ i18n lng: 'en', // Default language (will be overridden by settings) fallbackLng: 'en', defaultNS, - ns: ['common', 'navigation', 'settings', 'tasks', 'welcome', 'onboarding', 'dialogs', 'gitlab', 'taskReview'], + ns: ['common', 'navigation', 'settings', 'tasks', 'welcome', 'onboarding', 'dialogs', 'gitlab', 'taskReview', 'terminal'], interpolation: { escapeValue: false // React already escapes values }, diff --git a/apps/frontend/src/shared/i18n/locales/en/common.json b/apps/frontend/src/shared/i18n/locales/en/common.json index c5e9be9e9d..426e32fd26 100644 --- a/apps/frontend/src/shared/i18n/locales/en/common.json +++ b/apps/frontend/src/shared/i18n/locales/en/common.json @@ -5,7 +5,23 @@ "hideArchived": "Hide archived", "showArchivedTasks": "Show archived tasks", "hideArchivedTasks": "Hide archived tasks", - "closeTab": "Close tab" + "closeTab": "Close tab", + "closeTabAriaLabel": "Close tab (removes project from app)", + "addProjectAriaLabel": "Add project" + }, + "accessibility": { + "deleteFeatureAriaLabel": "Delete feature", + "closeFeatureDetailsAriaLabel": "Close feature details", + "regenerateRoadmapAriaLabel": "Regenerate Roadmap", + "repositoryOwnerAriaLabel": "Repository owner", + "repositoryVisibilityAriaLabel": "Repository visibility", + "opensInNewWindow": "opens in new window", + "visitExternalLink": "Visit {{name}} (opens in new window)", + "upgradeSubscriptionAriaLabel": "Upgrade subscription (opens in new window)", + "learnMoreAriaLabel": "Learn more (opens in new window)", + "toggleFolder": "Toggle {{name}} folder", + "expandFolder": "Expand {{name}} folder", + "collapseFolder": "Collapse {{name}} folder" }, "buttons": { "save": "Save", @@ -37,6 +53,7 @@ "success": "Success", "initializing": "Initializing...", "saving": "Saving...", + "creating": "Creating...", "noData": "No data", "optional": "Optional", "required": "Required", @@ -144,14 +161,18 @@ "reviewStatus": "Review Status", "files": "files", "filesChanged": "{{count}} files changed", + "clickToViewFiles": "Click to view changed files", + "loadingFiles": "Loading files...", + "noFilesAvailable": "File list not available", "posting": "Posting...", "postingApproval": "Posting Approval...", "postFindings": "Post {{count}} Finding", "postFindings_plural": "Post {{count}} Findings", "approve": "Approve", "merge": "Merge", - "autoApprovePR": "Auto-Approve PR", - "suggestions": "+{{count}} suggestions", + "mergeViaGitHub": "Merge via GitHub CLI. May fail if branch protection rules require additional reviews or checks.", + "autoApprovePR": "Approve PR", + "suggestions": "with {{count}} suggestions", "postedFindings": "Posted {{count}} finding", "postedFindings_plural": "Posted {{count}} findings", "resolved": "{{count}} resolved", @@ -227,7 +248,19 @@ "findingsFoundSelectPost_plural": "{{count}} findings found. Select and post to GitHub.", "reviewLogs": "Review Logs", "followup": "Follow-up", - "initial": "Initial" + "initial": "Initial", + "rerunFollowup": "Re-run follow-up review", + "rerunReview": "Re-run review", + "loadingMore": "Loading more PRs...", + "scrollForMore": "Scroll for more", + "allPRsLoaded": "All PRs loaded", + "workflowsAwaitingApproval": "{{count}} Workflow Awaiting Approval", + "workflowsAwaitingApproval_plural": "{{count}} Workflows Awaiting Approval", + "blockedByWorkflows": "Blocked", + "workflowsAwaitingDescription": "This PR is from a fork and requires workflow approval before CI checks can run. Approve the workflows to continue.", + "viewOnGitHub": "View", + "approveWorkflow": "Approve", + "approveAllWorkflows": "Approve All Workflows" }, "downloads": { "toggleExpand": "Toggle download details", diff --git a/apps/frontend/src/shared/i18n/locales/en/dialogs.json b/apps/frontend/src/shared/i18n/locales/en/dialogs.json index 161d628b0b..6a3d468258 100644 --- a/apps/frontend/src/shared/i18n/locales/en/dialogs.json +++ b/apps/frontend/src/shared/i18n/locales/en/dialogs.json @@ -38,7 +38,13 @@ "branchDescription": "Choose which branch Auto Claude should use as the base for creating task branches.", "whyBranch": "Why select a branch?", "branchExplanation": "Auto Claude creates isolated workspaces for each task. Selecting the right base branch ensures your tasks start with the latest code from your main development line.", - "ready": "Auto Claude is ready to use! You can now create tasks that will be automatically based on the {{branchName}} branch." + "ready": "Auto Claude is ready to use! You can now create tasks that will be automatically based on the {{branchName}} branch.", + "createRepoAriaLabel": "Create new repository on GitHub", + "linkRepoAriaLabel": "Link to existing repository", + "goBackAriaLabel": "Go back to repository selection", + "selectOwnerAriaLabel": "Select {{owner}} as repository owner", + "selectOrgAriaLabel": "Select {{org}} as repository owner", + "selectVisibilityAriaLabel": "Set repository visibility to {{visibility}}" }, "worktrees": { "title": "Worktrees", @@ -109,7 +115,9 @@ "nameRequired": "Please enter a project name", "locationRequired": "Please select a location", "failedToOpen": "Failed to open project", - "failedToCreate": "Failed to create project" + "failedToCreate": "Failed to create project", + "openExistingAriaLabel": "Open existing project folder", + "createNewAriaLabel": "Create new project" }, "customModel": { "title": "Custom Model Configuration", diff --git a/apps/frontend/src/shared/i18n/locales/en/navigation.json b/apps/frontend/src/shared/i18n/locales/en/navigation.json index e598166f86..c161c85665 100644 --- a/apps/frontend/src/shared/i18n/locales/en/navigation.json +++ b/apps/frontend/src/shared/i18n/locales/en/navigation.json @@ -43,6 +43,7 @@ "latest": "Latest", "lastChecked": "Last checked", "learnMore": "Learn more about Claude Code", + "learnMoreAriaLabel": "Learn more about Claude Code (opens in new window)", "updateWarningTitle": "Update Claude Code?", "updateWarningDescription": "Updating will close all running Claude Code sessions. Any unsaved work in those sessions may be lost. Make sure to save your work before proceeding.", "updateAnyway": "Update Anyway" diff --git a/apps/frontend/src/shared/i18n/locales/en/onboarding.json b/apps/frontend/src/shared/i18n/locales/en/onboarding.json index 3852575591..d2b5f77c93 100644 --- a/apps/frontend/src/shared/i18n/locales/en/onboarding.json +++ b/apps/frontend/src/shared/i18n/locales/en/onboarding.json @@ -36,7 +36,42 @@ }, "memory": { "title": "Memory", - "description": "Auto Claude Memory helps remember context across your coding sessions" + "description": "Configure persistent cross-session memory for agents", + "contextDescription": "Auto Claude Memory helps remember context across your coding sessions", + "enableMemory": "Enable Memory", + "enableMemoryDescription": "Persistent cross-session memory using LadybugDB (embedded database)", + "memoryDisabledInfo": "Memory is disabled. Session insights will be stored in local files only. Enable Memory for persistent cross-session context with semantic search.", + "enableAgentAccess": "Enable Agent Memory Access", + "enableAgentAccessDescription": "Allow agents to search and add to the knowledge graph via MCP", + "mcpServerUrl": "Graphiti MCP Server URL", + "mcpServerUrlDescription": "URL of the Graphiti MCP server for agent memory access", + "embeddingProvider": "Embedding Provider", + "embeddingProviderDescription": "Provider for semantic search (optional - keyword search works without)", + "selectEmbeddingModel": "Select Embedding Model", + "openaiApiKey": "OpenAI API Key", + "openaiApiKeyDescription": "Required for OpenAI embeddings", + "openaiGetKey": "Get your key from", + "voyageApiKey": "Voyage AI API Key", + "voyageApiKeyDescription": "Required for Voyage AI embeddings", + "googleApiKey": "Google AI API Key", + "googleApiKeyDescription": "Required for Google AI embeddings", + "azureConfig": "Azure OpenAI Configuration", + "azureApiKey": "API Key", + "azureBaseUrl": "Base URL", + "azureEmbeddingDeployment": "Embedding Deployment Name", + "memoryInfo": "Memory stores discoveries, patterns, and insights about your codebase so future sessions start with context already loaded. No Docker required - uses an embedded database.", + "learnMore": "Learn more about Memory", + "back": "Back", + "skip": "Skip", + "saving": "Saving...", + "saveAndContinue": "Save & Continue", + "providers": { + "ollama": "Ollama (Local - Free)", + "openai": "OpenAI", + "voyage": "Voyage AI", + "google": "Google AI", + "azure": "Azure OpenAI" + } }, "completion": { "title": "You're All Set!", @@ -63,12 +98,35 @@ }, "steps": { "welcome": "Welcome", + "authChoice": "Auth Method", "auth": "Auth", "claudeCode": "CLI", "devtools": "Dev Tools", + "privacy": "Privacy", "memory": "Memory", "done": "Done" }, + "privacy": { + "title": "Help Improve Auto Claude", + "subtitle": "Anonymous error reporting helps us fix bugs faster", + "whatWeCollect": { + "title": "What we collect", + "crashReports": "Crash reports and error stack traces", + "errorMessages": "Error messages (with file paths anonymized)", + "appVersion": "App version and platform info" + }, + "whatWeNeverCollect": { + "title": "What we never collect", + "code": "Your code or project files", + "filenames": "Full file paths (usernames are masked)", + "apiKeys": "API keys or tokens", + "personalData": "Personal information or usage data" + }, + "toggle": { + "label": "Send anonymous error reports", + "description": "Help us identify and fix issues" + } + }, "claudeCode": { "title": "Claude Code CLI", "description": "Install or update the Claude Code CLI to enable AI-powered features", diff --git a/apps/frontend/src/shared/i18n/locales/en/settings.json b/apps/frontend/src/shared/i18n/locales/en/settings.json index a39a135ec1..c102d26ac2 100644 --- a/apps/frontend/src/shared/i18n/locales/en/settings.json +++ b/apps/frontend/src/shared/i18n/locales/en/settings.json @@ -33,6 +33,10 @@ "title": "Integrations", "description": "API keys & Claude accounts" }, + "api-profiles": { + "title": "API Profiles", + "description": "Custom API endpoint profiles" + }, "updates": { "title": "Updates", "description": "Auto Claude updates" @@ -46,6 +50,128 @@ "description": "Troubleshooting tools" } }, + "apiProfiles": { + "title": "API Profiles", + "description": "Configure custom Anthropic-compatible API endpoints", + "addButton": "Add Profile", + "presets": { + "anthropic": "Anthropic", + "openrouter": "OpenRouter", + "groq": "Groq", + "glmGlobal": "GLM (Global)", + "glmChina": "GLM (China)" + }, + "fields": { + "name": "Name", + "preset": "Preset", + "baseUrl": "Base URL", + "apiKey": "API Key" + }, + "placeholders": { + "name": "My Custom API", + "preset": "Choose a provider preset", + "baseUrl": "https://api.anthropic.com", + "apiKey": "sk-ant-..." + }, + "hints": { + "preset": "Presets fill the base URL; you still need to paste your API key.", + "baseUrl": "Example: https://api.anthropic.com or http://localhost:8080" + }, + "validation": { + "nameRequired": "Name is required", + "baseUrlRequired": "Base URL is required", + "baseUrlInvalid": "Invalid URL format (must be http:// or https://)", + "apiKeyRequired": "API Key is required", + "apiKeyInvalid": "Invalid API Key format" + }, + "actions": { + "save": "Save Profile", + "saving": "Saving...", + "cancel": "Cancel", + "changeKey": "Change", + "cancelKeyChange": "Cancel" + }, + "testConnection": { + "label": "Test Connection", + "testing": "Testing...", + "success": "Connection Successful", + "failure": "Connection Failed" + }, + "models": { + "title": "Optional: Model Name Mappings", + "description": "Select models from your API provider. Leave blank to use defaults.", + "defaultLabel": "Default Model (Optional)", + "haikuLabel": "Haiku Model (Optional)", + "sonnetLabel": "Sonnet Model (Optional)", + "opusLabel": "Opus Model (Optional)", + "defaultPlaceholder": "e.g., claude-3-5-sonnet-20241022", + "haikuPlaceholder": "e.g., claude-3-5-haiku-20241022", + "sonnetPlaceholder": "e.g., claude-3-5-sonnet-20241022", + "opusPlaceholder": "e.g., claude-3-5-opus-20241022" + }, + "empty": { + "title": "No API profiles configured", + "description": "Create a profile to configure custom API endpoints for your builds.", + "action": "Create First Profile" + }, + "switchToOauth": { + "label": "Switch to OAuth", + "loading": "Switching..." + }, + "activeBadge": "Active", + "customModels": "Custom models: {{models}}", + "setActive": { + "label": "Set Active", + "loading": "Setting..." + }, + "tooltips": { + "edit": "Edit profile", + "deleteActive": "Switch to OAuth before deleting", + "deleteInactive": "Delete profile" + }, + "deleteAriaLabel": "Delete profile {{name}}", + "toast": { + "create": { + "title": "Profile created", + "description": "\"{{name}}\" has been added successfully." + }, + "update": { + "title": "Profile updated", + "description": "\"{{name}}\" has been updated successfully." + }, + "delete": { + "title": "Profile deleted", + "description": "\"{{name}}\" has been removed.", + "errorTitle": "Failed to delete profile", + "errorFallback": "An error occurred while deleting the profile." + }, + "switch": { + "oauthTitle": "Switched to OAuth", + "oauthDescription": "Now using OAuth authentication", + "profileTitle": "Profile activated", + "profileDescription": "Now using {{name}}", + "errorTitle": "Failed to switch authentication", + "errorFallback": "An error occurred while switching authentication method." + } + }, + "dialog": { + "createTitle": "Add API Profile", + "editTitle": "Edit Profile", + "description": "Configure a custom Anthropic-compatible API endpoint for your builds.", + "deleteTitle": "Delete Profile?", + "deleteDescription": "Are you sure you want to delete \"{{name}}\"? This action cannot be undone.", + "cancel": "Cancel", + "delete": "Delete", + "deleting": "Deleting..." + } + }, + "modelSelect": { + "placeholder": "Select a model or type manually", + "placeholderManual": "Enter model name (e.g., claude-3-5-sonnet-20241022)", + "searchPlaceholder": "Search models...", + "noResults": "No models match your search", + "discoveryNotAvailable": "Model discovery not available. Enter model name manually." + }, "language": { "label": "Interface Language", "description": "Select the language for the application interface" @@ -91,6 +217,7 @@ "sourceUserConfig": "User Configuration", "sourceVenv": "Virtual Environment", "sourceHomebrew": "Homebrew", + "sourceNvm": "NVM", "sourceSystemPath": "System PATH", "sourceBundled": "Bundled", "sourceFallback": "Fallback", @@ -157,7 +284,11 @@ "autoUpdateProjects": "Auto-Update Projects", "autoUpdateProjectsDescription": "Automatically update Auto Claude in projects when a new version is available", "betaUpdates": "Beta Updates", - "betaUpdatesDescription": "Receive pre-release beta versions with new features (may be less stable)" + "betaUpdatesDescription": "Receive pre-release beta versions with new features (may be less stable)", + "stableDowngradeAvailable": "Stable Version Available", + "stableDowngradeDescription": "You're currently on a beta version. Since you've disabled beta updates, you can switch to the latest stable release.", + "stableVersion": "Stable Version", + "downloadStableVersion": "Download Stable Version" }, "notifications": { "title": "Notifications", @@ -234,7 +365,9 @@ "selectThinkingLevel": "Select thinking level", "perPhaseOptimization": "(per-phase optimization)", "resetToDefaults": "Reset to defaults", - "phaseConfigNote": "These settings will be used as defaults when creating new tasks with the Auto profile. You can override them per-task in the task creation wizard.", + "resetToProfileDefaults": "Reset to {{profile}} defaults", + "customized": "Customized", + "phaseConfigNote": "These settings will be used as defaults when creating new tasks with this profile. You can override them per-task in the task creation wizard.", "phases": { "spec": { "label": "Spec Creation", @@ -308,6 +441,10 @@ "debug": { "title": "Debug & Logs", "description": "Access logs and debug information for troubleshooting", + "errorReporting": { + "label": "Anonymous Error Reporting", + "description": "Send crash reports to help improve Auto Claude. No personal data or code is collected." + }, "openLogsFolder": "Open Logs Folder", "copyDebugInfo": "Copy Debug Info", "copied": "Copied!", diff --git a/apps/frontend/src/shared/i18n/locales/en/tasks.json b/apps/frontend/src/shared/i18n/locales/en/tasks.json index 8602b8a2e7..d49d925bf7 100644 --- a/apps/frontend/src/shared/i18n/locales/en/tasks.json +++ b/apps/frontend/src/shared/i18n/locales/en/tasks.json @@ -14,7 +14,9 @@ "resume": "Resume", "archive": "Archive", "delete": "Delete", - "view": "View Details" + "view": "View Details", + "moveTo": "Move to", + "taskActions": "Task actions" }, "labels": { "running": "Running", @@ -46,6 +48,13 @@ "title": "No tasks yet", "description": "Create your first task to get started" }, + "columns": { + "backlog": "Planning", + "in_progress": "In Progress", + "ai_review": "AI Review", + "human_review": "Human Review", + "done": "Done" + }, "kanban": { "emptyBacklog": "No tasks planned", "emptyBacklogHint": "Add a task to get started", @@ -59,7 +68,11 @@ "emptyDoneHint": "Approved tasks appear here", "emptyDefault": "No tasks", "dropHere": "Drop here", - "showArchived": "Show archived" + "showArchived": "Show archived", + "addTaskAriaLabel": "Add new task to backlog", + "closeTaskDetailsAriaLabel": "Close task details", + "editTask": "Edit task", + "cannotEditWhileRunning": "Cannot edit while task is running" }, "execution": { "phases": { @@ -95,5 +108,22 @@ "retry": "Retry", "selectFile": "Select a file to view its contents", "openInIDE": "Open in IDE" + }, + "metadata": { + "severity": "severity" + }, + "images": { + "removeImageAriaLabel": "Remove image {{filename}}", + "pasteHint": "Tip: Paste screenshots directly with {{shortcut}} to add reference images." + }, + "notifications": { + "backgroundTaskTitle": "Task continues in background", + "backgroundTaskDescription": "The task is still running. You can reopen this dialog to monitor progress." + }, + "wizard": { + "gitOptions": { + "useWorktreeLabel": "Use isolated workspace (recommended)", + "useWorktreeDescription": "Creates changes in a separate git worktree for safe review before merging. Disable to build directly in your project (faster but riskier)." + } } } diff --git a/apps/frontend/src/shared/i18n/locales/en/terminal.json b/apps/frontend/src/shared/i18n/locales/en/terminal.json new file mode 100644 index 0000000000..b29808a2f0 --- /dev/null +++ b/apps/frontend/src/shared/i18n/locales/en/terminal.json @@ -0,0 +1,36 @@ +{ + "expand": { + "expand": "Expand terminal", + "collapse": "Collapse terminal" + }, + "auth": { + "terminalTitle": "Auth: {{profileName}}", + "maxTerminalsReached": "Cannot open auth terminal: maximum terminals reached. Close a terminal first." + }, + "worktree": { + "create": "Worktree", + "createNew": "New Worktree", + "existing": "Existing Worktrees", + "createTitle": "Create Terminal Worktree", + "createDescription": "Create an isolated workspace for this terminal. All work will happen in the worktree directory.", + "name": "Worktree Name", + "namePlaceholder": "my-feature", + "nameRequired": "Worktree name is required", + "nameInvalid": "Name must start and end with a letter or number", + "nameHelp": "Lowercase letters, numbers, dashes, and underscores only", + "associateTask": "Link to Task", + "selectTask": "Select a task...", + "noTask": "No task (standalone worktree)", + "createBranch": "Create Git Branch", + "branchHelp": "Creates branch: {{branch}}", + "baseBranch": "Base Branch", + "selectBaseBranch": "Select base branch...", + "useProjectDefault": "Use project default ({{branch}})", + "baseBranchHelp": "The branch to create the worktree from", + "openInIDE": "Open in IDE", + "maxReached": "Maximum of 12 terminal worktrees reached", + "alreadyExists": "A worktree with this name already exists", + "deleteTitle": "Delete Worktree?", + "deleteDescription": "This will permanently delete the worktree and its branch. Any uncommitted changes will be lost." + } +} diff --git a/apps/frontend/src/shared/i18n/locales/en/welcome.json b/apps/frontend/src/shared/i18n/locales/en/welcome.json index 9bc04eff29..7c67fee7c2 100644 --- a/apps/frontend/src/shared/i18n/locales/en/welcome.json +++ b/apps/frontend/src/shared/i18n/locales/en/welcome.json @@ -11,6 +11,7 @@ "title": "Recent Projects", "empty": "No projects yet", "emptyDescription": "Create a new project or open an existing one to get started", - "openFolder": "Open Folder" + "openFolder": "Open Folder", + "openProjectAriaLabel": "Open project {{name}}" } } diff --git a/apps/frontend/src/shared/i18n/locales/fr/common.json b/apps/frontend/src/shared/i18n/locales/fr/common.json index 4c261afe7f..dbec9b0285 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/common.json +++ b/apps/frontend/src/shared/i18n/locales/fr/common.json @@ -5,7 +5,23 @@ "hideArchived": "Masquer archivรฉs", "showArchivedTasks": "Afficher les tรขches archivรฉes", "hideArchivedTasks": "Masquer les tรขches archivรฉes", - "closeTab": "Fermer l'onglet" + "closeTab": "Fermer l'onglet", + "closeTabAriaLabel": "Fermer l'onglet (retire le projet de l'application)", + "addProjectAriaLabel": "Ajouter un projet" + }, + "accessibility": { + "deleteFeatureAriaLabel": "Supprimer la fonctionnalitรฉ", + "closeFeatureDetailsAriaLabel": "Fermer les dรฉtails de la fonctionnalitรฉ", + "regenerateRoadmapAriaLabel": "Rรฉgรฉnรฉrer la feuille de route", + "repositoryOwnerAriaLabel": "Propriรฉtaire du dรฉpรดt", + "repositoryVisibilityAriaLabel": "Visibilitรฉ du dรฉpรดt", + "opensInNewWindow": "s'ouvre dans une nouvelle fenรชtre", + "visitExternalLink": "Visiter {{name}} (s'ouvre dans une nouvelle fenรชtre)", + "upgradeSubscriptionAriaLabel": "Mettre ร  niveau l'abonnement (s'ouvre dans une nouvelle fenรชtre)", + "learnMoreAriaLabel": "En savoir plus (s'ouvre dans une nouvelle fenรชtre)", + "toggleFolder": "Basculer le dossier {{name}}", + "expandFolder": "Dรฉplier le dossier {{name}}", + "collapseFolder": "Replier le dossier {{name}}" }, "buttons": { "save": "Enregistrer", @@ -37,6 +53,7 @@ "success": "Succรจs", "initializing": "Initialisation...", "saving": "Enregistrement...", + "creating": "Creation...", "noData": "Aucune donnรฉe", "optional": "Optionnel", "required": "Requis", @@ -144,14 +161,18 @@ "reviewStatus": "Statut de rรฉvision", "files": "fichiers", "filesChanged": "{{count}} fichiers modifiรฉs", + "clickToViewFiles": "Cliquez pour voir les fichiers modifiรฉs", + "loadingFiles": "Chargement des fichiers...", + "noFilesAvailable": "Liste des fichiers non disponible", "posting": "Publication...", "postingApproval": "Publication de l'approbation...", "postFindings": "Publier {{count}} rรฉsultat", "postFindings_plural": "Publier {{count}} rรฉsultats", "approve": "Approuver", "merge": "Fusionner", - "autoApprovePR": "Auto-approuver PR", - "suggestions": "+{{count}} suggestions", + "mergeViaGitHub": "Fusionner via GitHub CLI. Peut รฉchouer si les rรจgles de protection de branche nรฉcessitent des rรฉvisions ou vรฉrifications supplรฉmentaires.", + "autoApprovePR": "Approuver PR", + "suggestions": "avec {{count}} suggestions", "postedFindings": "{{count}} rรฉsultat publiรฉ", "postedFindings_plural": "{{count}} rรฉsultats publiรฉs", "resolved": "{{count}} rรฉsolu", @@ -227,7 +248,19 @@ "findingsFoundSelectPost_plural": "{{count}} rรฉsultats trouvรฉs. Sรฉlectionnez et publiez sur GitHub.", "reviewLogs": "Journaux de rรฉvision", "followup": "Suivi", - "initial": "Initial" + "initial": "Initial", + "rerunFollowup": "Relancer la revue de suivi", + "rerunReview": "Relancer la revue", + "loadingMore": "Chargement des PRs...", + "scrollForMore": "Dรฉfiler pour plus", + "allPRsLoaded": "Tous les PRs chargรฉs", + "workflowsAwaitingApproval": "{{count}} workflow en attente d'approbation", + "workflowsAwaitingApproval_plural": "{{count}} workflows en attente d'approbation", + "blockedByWorkflows": "Bloquรฉ", + "workflowsAwaitingDescription": "Cette PR provient d'un fork et nรฉcessite l'approbation des workflows avant que les vรฉrifications CI puissent s'exรฉcuter. Approuvez les workflows pour continuer.", + "viewOnGitHub": "Voir", + "approveWorkflow": "Approuver", + "approveAllWorkflows": "Approuver tous les workflows" }, "downloads": { "toggleExpand": "Afficher/masquer les dรฉtails", diff --git a/apps/frontend/src/shared/i18n/locales/fr/dialogs.json b/apps/frontend/src/shared/i18n/locales/fr/dialogs.json index e51972c69b..e2bf66489b 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/dialogs.json +++ b/apps/frontend/src/shared/i18n/locales/fr/dialogs.json @@ -38,7 +38,13 @@ "branchDescription": "Choisissez quelle branche Auto Claude doit utiliser comme base pour crรฉer les branches de tรขches.", "whyBranch": "Pourquoi sรฉlectionner une branche ?", "branchExplanation": "Auto Claude crรฉe des espaces de travail isolรฉs pour chaque tรขche. Sรฉlectionner la bonne branche de base garantit que vos tรขches dรฉmarrent avec le code le plus rรฉcent de votre ligne de dรฉveloppement principale.", - "ready": "Auto Claude est prรชt ร  l'emploi ! Vous pouvez maintenant crรฉer des tรขches qui seront automatiquement basรฉes sur la branche {{branchName}}." + "ready": "Auto Claude est prรชt ร  l'emploi ! Vous pouvez maintenant crรฉer des tรขches qui seront automatiquement basรฉes sur la branche {{branchName}}.", + "createRepoAriaLabel": "Crรฉer un nouveau dรฉpรดt sur GitHub", + "linkRepoAriaLabel": "Lier ร  un dรฉpรดt existant", + "goBackAriaLabel": "Retourner ร  la sรฉlection du dรฉpรดt", + "selectOwnerAriaLabel": "Sรฉlectionner {{owner}} comme propriรฉtaire du dรฉpรดt", + "selectOrgAriaLabel": "Sรฉlectionner {{org}} comme propriรฉtaire du dรฉpรดt", + "selectVisibilityAriaLabel": "Dรฉfinir la visibilitรฉ du dรฉpรดt sur {{visibility}}" }, "worktrees": { "title": "Worktrees", @@ -109,7 +115,9 @@ "nameRequired": "Veuillez entrer un nom de projet", "locationRequired": "Veuillez sรฉlectionner un emplacement", "failedToOpen": "ร‰chec de l'ouverture du projet", - "failedToCreate": "ร‰chec de la crรฉation du projet" + "failedToCreate": "ร‰chec de la crรฉation du projet", + "openExistingAriaLabel": "Ouvrir un dossier de projet existant", + "createNewAriaLabel": "Crรฉer un nouveau projet" }, "customModel": { "title": "Configuration du modรจle personnalisรฉ", diff --git a/apps/frontend/src/shared/i18n/locales/fr/navigation.json b/apps/frontend/src/shared/i18n/locales/fr/navigation.json index 3ad44f0ee8..2f47585fe8 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/navigation.json +++ b/apps/frontend/src/shared/i18n/locales/fr/navigation.json @@ -43,6 +43,7 @@ "latest": "Derniรจre", "lastChecked": "Derniรจre vรฉrification", "learnMore": "En savoir plus sur Claude Code", + "learnMoreAriaLabel": "En savoir plus sur Claude Code (s'ouvre dans une nouvelle fenรชtre)", "updateWarningTitle": "Mettre ร  jour Claude Code ?", "updateWarningDescription": "La mise ร  jour fermera toutes les sessions Claude Code en cours. Tout travail non sauvegardรฉ dans ces sessions pourrait รชtre perdu. Assurez-vous de sauvegarder votre travail avant de continuer.", "updateAnyway": "Mettre ร  jour quand mรชme" diff --git a/apps/frontend/src/shared/i18n/locales/fr/onboarding.json b/apps/frontend/src/shared/i18n/locales/fr/onboarding.json index 1a05ac0423..c494115c48 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/onboarding.json +++ b/apps/frontend/src/shared/i18n/locales/fr/onboarding.json @@ -36,7 +36,42 @@ }, "memory": { "title": "Mรฉmoire", - "description": "La mรฉmoire Auto Claude aide ร  retenir le contexte entre vos sessions de code" + "description": "Configurer la mรฉmoire persistante entre sessions pour les agents", + "contextDescription": "La mรฉmoire Auto Claude aide ร  retenir le contexte entre vos sessions de code", + "enableMemory": "Activer la mรฉmoire", + "enableMemoryDescription": "Mรฉmoire persistante entre sessions utilisant LadybugDB (base de donnรฉes intรฉgrรฉe)", + "memoryDisabledInfo": "La mรฉmoire est dรฉsactivรฉe. Les informations de session seront stockรฉes uniquement dans des fichiers locaux. Activez la mรฉmoire pour un contexte persistant entre sessions avec recherche sรฉmantique.", + "enableAgentAccess": "Activer l'accรจs mรฉmoire des agents", + "enableAgentAccessDescription": "Permettre aux agents de rechercher et d'ajouter au graphe de connaissances via MCP", + "mcpServerUrl": "URL du serveur Graphiti MCP", + "mcpServerUrlDescription": "URL du serveur Graphiti MCP pour l'accรจs mรฉmoire des agents", + "embeddingProvider": "Fournisseur d'embeddings", + "embeddingProviderDescription": "Fournisseur pour la recherche sรฉmantique (optionnel - la recherche par mots-clรฉs fonctionne sans)", + "selectEmbeddingModel": "Sรฉlectionner le modรจle d'embedding", + "openaiApiKey": "Clรฉ API OpenAI", + "openaiApiKeyDescription": "Requise pour les embeddings OpenAI", + "openaiGetKey": "Obtenez votre clรฉ sur", + "voyageApiKey": "Clรฉ API Voyage AI", + "voyageApiKeyDescription": "Requise pour les embeddings Voyage AI", + "googleApiKey": "Clรฉ API Google AI", + "googleApiKeyDescription": "Requise pour les embeddings Google AI", + "azureConfig": "Configuration Azure OpenAI", + "azureApiKey": "Clรฉ API", + "azureBaseUrl": "URL de base", + "azureEmbeddingDeployment": "Nom du dรฉploiement d'embedding", + "memoryInfo": "La mรฉmoire stocke les dรฉcouvertes, motifs et informations sur votre codebase pour que les futures sessions dรฉmarrent avec le contexte dรฉjร  chargรฉ. Pas de Docker requis - utilise une base de donnรฉes intรฉgrรฉe.", + "learnMore": "En savoir plus sur la mรฉmoire", + "back": "Retour", + "skip": "Passer", + "saving": "Enregistrement...", + "saveAndContinue": "Enregistrer et continuer", + "providers": { + "ollama": "Ollama (Local - Gratuit)", + "openai": "OpenAI", + "voyage": "Voyage AI", + "google": "Google AI", + "azure": "Azure OpenAI" + } }, "completion": { "title": "Vous รชtes prรชt !", @@ -63,12 +98,35 @@ }, "steps": { "welcome": "Bienvenue", + "authChoice": "Mรฉthode d'auth", "auth": "Auth", "claudeCode": "CLI", "devtools": "Outils dev", + "privacy": "Confidentialitรฉ", "memory": "Mรฉmoire", "done": "Terminรฉ" }, + "privacy": { + "title": "Aidez ร  amรฉliorer Auto Claude", + "subtitle": "Les rapports d'erreurs anonymes nous aident ร  corriger les bugs plus rapidement", + "whatWeCollect": { + "title": "Ce que nous collectons", + "crashReports": "Rapports de crash et traces d'erreurs", + "errorMessages": "Messages d'erreur (avec chemins de fichiers anonymisรฉs)", + "appVersion": "Version de l'app et informations systรจme" + }, + "whatWeNeverCollect": { + "title": "Ce que nous ne collectons jamais", + "code": "Votre code ou fichiers de projet", + "filenames": "Chemins de fichiers complets (noms d'utilisateur masquรฉs)", + "apiKeys": "Clรฉs API ou jetons", + "personalData": "Informations personnelles ou donnรฉes d'utilisation" + }, + "toggle": { + "label": "Envoyer des rapports d'erreurs anonymes", + "description": "Aidez-nous ร  identifier et corriger les problรจmes" + } + }, "claudeCode": { "title": "Claude Code CLI", "description": "Installez ou mettez ร  jour le CLI Claude Code pour activer les fonctionnalitรฉs IA", diff --git a/apps/frontend/src/shared/i18n/locales/fr/settings.json b/apps/frontend/src/shared/i18n/locales/fr/settings.json index 4e37397114..ab972347de 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/settings.json +++ b/apps/frontend/src/shared/i18n/locales/fr/settings.json @@ -33,6 +33,10 @@ "title": "Intรฉgrations", "description": "Clรฉs API & comptes Claude" }, + "api-profiles": { + "title": "Profils API", + "description": "Profils d'endpoint API personnalisรฉs" + }, "updates": { "title": "Mises ร  jour", "description": "Mises ร  jour Auto Claude" @@ -46,6 +50,128 @@ "description": "Outils de dรฉpannage" } }, + "apiProfiles": { + "title": "Profils API", + "description": "Configurez des endpoints API compatibles Anthropic personnalisรฉs", + "addButton": "Ajouter un profil", + "presets": { + "anthropic": "Anthropic", + "openrouter": "OpenRouter", + "groq": "Groq", + "glmGlobal": "GLM (Global)", + "glmChina": "GLM (China)" + }, + "fields": { + "name": "Nom", + "preset": "Prรฉrรฉglage", + "baseUrl": "URL de base", + "apiKey": "Clรฉ API" + }, + "placeholders": { + "name": "Mon API personnalisรฉe", + "preset": "Choisir un prรฉrรฉglage de fournisseur", + "baseUrl": "https://api.anthropic.com", + "apiKey": "sk-ant-..." + }, + "hints": { + "preset": "Les prรฉrรฉglages remplissent l'URL de base ; vous devez toujours coller votre clรฉ API.", + "baseUrl": "Exemple : https://api.anthropic.com ou http://localhost:8080" + }, + "validation": { + "nameRequired": "Le nom est requis", + "baseUrlRequired": "L'URL de base est requise", + "baseUrlInvalid": "Format d'URL invalide (doit รชtre http:// ou https://)", + "apiKeyRequired": "La clรฉ API est requise", + "apiKeyInvalid": "Format de clรฉ API invalide" + }, + "actions": { + "save": "Enregistrer le profil", + "saving": "Enregistrement...", + "cancel": "Annuler", + "changeKey": "Modifier", + "cancelKeyChange": "Annuler" + }, + "testConnection": { + "label": "Tester la connexion", + "testing": "Test en cours...", + "success": "Connexion rรฉussie", + "failure": "ร‰chec de la connexion" + }, + "models": { + "title": "Optionnel : correspondance des noms de modรจles", + "description": "Sรฉlectionnez des modรจles auprรจs de votre fournisseur d'API. Laissez vide pour utiliser les valeurs par dรฉfaut.", + "defaultLabel": "Modรจle par dรฉfaut (optionnel)", + "haikuLabel": "Modรจle Haiku (optionnel)", + "sonnetLabel": "Modรจle Sonnet (optionnel)", + "opusLabel": "Modรจle Opus (optionnel)", + "defaultPlaceholder": "ex. : claude-3-5-sonnet-20241022", + "haikuPlaceholder": "ex. : claude-3-5-haiku-20241022", + "sonnetPlaceholder": "ex. : claude-3-5-sonnet-20241022", + "opusPlaceholder": "ex. : claude-3-5-opus-20241022" + }, + "empty": { + "title": "Aucun profil API configurรฉ", + "description": "Crรฉez un profil pour configurer des endpoints API personnalisรฉs pour vos builds.", + "action": "Crรฉer le premier profil" + }, + "switchToOauth": { + "label": "Passer ร  OAuth", + "loading": "Basculement..." + }, + "activeBadge": "Actif", + "customModels": "Modรจles personnalisรฉs : {{models}}", + "setActive": { + "label": "Dรฉfinir comme actif", + "loading": "Activation..." + }, + "tooltips": { + "edit": "Modifier le profil", + "deleteActive": "Passez ร  OAuth avant de supprimer", + "deleteInactive": "Supprimer le profil" + }, + "deleteAriaLabel": "Supprimer le profil {{name}}", + "toast": { + "create": { + "title": "Profil crรฉรฉ", + "description": "\"{{name}}\" a รฉtรฉ ajoutรฉ avec succรจs." + }, + "update": { + "title": "Profil mis ร  jour", + "description": "\"{{name}}\" a รฉtรฉ mis ร  jour avec succรจs." + }, + "delete": { + "title": "Profil supprimรฉ", + "description": "\"{{name}}\" a รฉtรฉ supprimรฉ.", + "errorTitle": "ร‰chec de la suppression du profil", + "errorFallback": "Une erreur s'est produite lors de la suppression du profil." + }, + "switch": { + "oauthTitle": "Passรฉ ร  OAuth", + "oauthDescription": "Authentification OAuth utilisรฉe", + "profileTitle": "Profil activรฉ", + "profileDescription": "Utilisation de {{name}}", + "errorTitle": "ร‰chec du changement d'authentification", + "errorFallback": "Une erreur s'est produite lors du changement de mรฉthode d'authentification." + } + }, + "dialog": { + "createTitle": "Ajouter un profil API", + "editTitle": "Modifier le profil", + "description": "Configurez un endpoint API compatible Anthropic personnalisรฉ pour vos builds.", + "deleteTitle": "Supprimer le profil ?", + "deleteDescription": "รŠtes-vous sรปr de vouloir supprimer \"{{name}}\" ? Cette action est irrรฉversible.", + "cancel": "Annuler", + "delete": "Supprimer", + "deleting": "Suppression..." + } + }, + "modelSelect": { + "placeholder": "Sรฉlectionner un modรจle ou saisir manuellement", + "placeholderManual": "Saisir le nom du modรจle (ex. : claude-3-5-sonnet-20241022)", + "searchPlaceholder": "Rechercher des modรจles...", + "noResults": "Aucun modรจle ne correspond ร  votre recherche", + "discoveryNotAvailable": "Dรฉcouverte de modรจles indisponible. Saisissez le nom du modรจle manuellement." + }, "language": { "label": "Langue de l'interface", "description": "Sรฉlectionnez la langue de l'interface de l'application" @@ -91,6 +217,7 @@ "sourceUserConfig": "Configuration utilisateur", "sourceVenv": "Environnement virtuel", "sourceHomebrew": "Homebrew", + "sourceNvm": "NVM", "sourceSystemPath": "PATH systรจme", "sourceBundled": "Intรฉgrรฉ", "sourceFallback": "Valeur par dรฉfaut", @@ -157,7 +284,11 @@ "autoUpdateProjects": "Mise ร  jour automatique des projets", "autoUpdateProjectsDescription": "Mettre ร  jour automatiquement Auto Claude dans les projets quand une nouvelle version est disponible", "betaUpdates": "Mises ร  jour bรชta", - "betaUpdatesDescription": "Recevoir les versions bรชta prรฉ-release avec de nouvelles fonctionnalitรฉs (peut รชtre moins stable)" + "betaUpdatesDescription": "Recevoir les versions bรชta prรฉ-release avec de nouvelles fonctionnalitรฉs (peut รชtre moins stable)", + "stableDowngradeAvailable": "Version stable disponible", + "stableDowngradeDescription": "Vous รชtes actuellement sur une version bรชta. Comme vous avez dรฉsactivรฉ les mises ร  jour bรชta, vous pouvez passer ร  la derniรจre version stable.", + "stableVersion": "Version stable", + "downloadStableVersion": "Tรฉlรฉcharger la version stable" }, "notifications": { "title": "Notifications", @@ -234,7 +365,9 @@ "selectThinkingLevel": "Sรฉlectionner un niveau de rรฉflexion", "perPhaseOptimization": "(optimisation par phase)", "resetToDefaults": "Rรฉinitialiser par dรฉfaut", - "phaseConfigNote": "Ces paramรจtres seront utilisรฉs par dรฉfaut lors de la crรฉation de nouvelles tรขches avec le profil Auto. Vous pouvez les modifier par tรขche dans l'assistant de crรฉation.", + "resetToProfileDefaults": "Rรฉinitialiser aux dรฉfauts de {{profile}}", + "customized": "Personnalisรฉ", + "phaseConfigNote": "Ces paramรจtres seront utilisรฉs par dรฉfaut lors de la crรฉation de nouvelles tรขches avec ce profil. Vous pouvez les modifier par tรขche dans l'assistant de crรฉation.", "phases": { "spec": { "label": "Crรฉation de spec", @@ -308,6 +441,10 @@ "debug": { "title": "Debug & Logs", "description": "Accรฉdez aux logs et informations de dรฉbogage pour le dรฉpannage", + "errorReporting": { + "label": "Rapports d'erreurs anonymes", + "description": "Envoyer des rapports de crash pour amรฉliorer Auto Claude. Aucune donnรฉe personnelle ni code n'est collectรฉ." + }, "openLogsFolder": "Ouvrir le dossier des logs", "copyDebugInfo": "Copier les infos de dรฉbogage", "copied": "Copiรฉ !", diff --git a/apps/frontend/src/shared/i18n/locales/fr/tasks.json b/apps/frontend/src/shared/i18n/locales/fr/tasks.json index ea3e5b38fd..20f7eb5c98 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/tasks.json +++ b/apps/frontend/src/shared/i18n/locales/fr/tasks.json @@ -14,7 +14,9 @@ "resume": "Reprendre", "archive": "Archiver", "delete": "Supprimer", - "view": "Voir les dรฉtails" + "view": "Voir les dรฉtails", + "moveTo": "Dรฉplacer vers", + "taskActions": "Actions de la tรขche" }, "labels": { "running": "En cours", @@ -46,6 +48,13 @@ "title": "Aucune tรขche", "description": "Crรฉez votre premiรจre tรขche pour commencer" }, + "columns": { + "backlog": "Planification", + "in_progress": "En cours", + "ai_review": "Rรฉvision IA", + "human_review": "Rรฉvision humaine", + "done": "Terminรฉ" + }, "kanban": { "emptyBacklog": "Aucune tรขche planifiรฉe", "emptyBacklogHint": "Ajoutez une tรขche pour commencer", @@ -59,7 +68,11 @@ "emptyDoneHint": "Les tรขches approuvรฉes apparaissent ici", "emptyDefault": "Aucune tรขche", "dropHere": "Dรฉposer ici", - "showArchived": "Afficher les archivรฉes" + "showArchived": "Afficher les archivรฉes", + "addTaskAriaLabel": "Ajouter une nouvelle tรขche au backlog", + "closeTaskDetailsAriaLabel": "Fermer les dรฉtails de la tรขche", + "editTask": "Modifier la tรขche", + "cannotEditWhileRunning": "Impossible de modifier pendant l'exรฉcution" }, "execution": { "phases": { @@ -95,5 +108,22 @@ "retry": "Rรฉessayer", "selectFile": "Sรฉlectionnez un fichier pour voir son contenu", "openInIDE": "Ouvrir dans l'IDE" + }, + "metadata": { + "severity": "sรฉvรฉritรฉ" + }, + "images": { + "removeImageAriaLabel": "Supprimer l'image {{filename}}", + "pasteHint": "Astuce : Collez des captures d'รฉcran directement avec {{shortcut}} pour ajouter des images de rรฉfรฉrence." + }, + "notifications": { + "backgroundTaskTitle": "La tรขche continue en arriรจre-plan", + "backgroundTaskDescription": "La tรขche est toujours en cours. Vous pouvez rouvrir cette boรฎte de dialogue pour suivre la progression." + }, + "wizard": { + "gitOptions": { + "useWorktreeLabel": "Utiliser un espace de travail isolรฉ (recommandรฉ)", + "useWorktreeDescription": "Crรฉe les changements dans un worktree git sรฉparรฉ pour une rรฉvision sรฉcurisรฉe avant la fusion. Dรฉsactivez pour travailler directement dans votre projet (plus rapide mais risquรฉ)." + } } } diff --git a/apps/frontend/src/shared/i18n/locales/fr/terminal.json b/apps/frontend/src/shared/i18n/locales/fr/terminal.json new file mode 100644 index 0000000000..80867cde4d --- /dev/null +++ b/apps/frontend/src/shared/i18n/locales/fr/terminal.json @@ -0,0 +1,36 @@ +{ + "expand": { + "expand": "Agrandir le terminal", + "collapse": "Reduire le terminal" + }, + "auth": { + "terminalTitle": "Auth: {{profileName}}", + "maxTerminalsReached": "Impossible d'ouvrir le terminal d'auth: nombre maximum de terminaux atteint. Fermez un terminal d'abord." + }, + "worktree": { + "create": "Worktree", + "createNew": "Nouveau Worktree", + "existing": "Worktrees Existants", + "createTitle": "Creer un Worktree Terminal", + "createDescription": "Creer un espace de travail isole pour ce terminal. Tout le travail se fera dans le repertoire du worktree.", + "name": "Nom du Worktree", + "namePlaceholder": "ma-fonctionnalite", + "nameRequired": "Le nom du worktree est requis", + "nameInvalid": "Le nom doit commencer et se terminer par une lettre ou un chiffre", + "nameHelp": "Lettres minuscules, chiffres, tirets et underscores uniquement", + "associateTask": "Lier a une Tache", + "selectTask": "Selectionner une tache...", + "noTask": "Pas de tache (worktree autonome)", + "createBranch": "Creer une Branche Git", + "branchHelp": "Cree la branche: {{branch}}", + "baseBranch": "Branche de Base", + "selectBaseBranch": "Selectionner la branche de base...", + "useProjectDefault": "Utiliser la valeur par defaut du projet ({{branch}})", + "baseBranchHelp": "La branche a partir de laquelle creer le worktree", + "openInIDE": "Ouvrir dans IDE", + "maxReached": "Maximum de 12 worktrees terminal atteint", + "alreadyExists": "Un worktree avec ce nom existe deja", + "deleteTitle": "Supprimer le Worktree?", + "deleteDescription": "Ceci supprimera definitivement le worktree et sa branche. Les modifications non committรฉes seront perdues." + } +} diff --git a/apps/frontend/src/shared/i18n/locales/fr/welcome.json b/apps/frontend/src/shared/i18n/locales/fr/welcome.json index 2341f21fb3..8b0c9ed1db 100644 --- a/apps/frontend/src/shared/i18n/locales/fr/welcome.json +++ b/apps/frontend/src/shared/i18n/locales/fr/welcome.json @@ -11,6 +11,7 @@ "title": "Projets rรฉcents", "empty": "Aucun projet", "emptyDescription": "Crรฉez un nouveau projet ou ouvrez-en un existant pour commencer", - "openFolder": "Ouvrir le dossier" + "openFolder": "Ouvrir le dossier", + "openProjectAriaLabel": "Ouvrir le projet {{name}}" } } diff --git a/apps/frontend/src/shared/types/cli.ts b/apps/frontend/src/shared/types/cli.ts index 92f7645212..a104cda36d 100644 --- a/apps/frontend/src/shared/types/cli.ts +++ b/apps/frontend/src/shared/types/cli.ts @@ -17,6 +17,7 @@ export interface ToolDetectionResult { | 'user-config' | 'venv' | 'homebrew' + | 'nvm' | 'system-path' | 'bundled' | 'fallback'; diff --git a/apps/frontend/src/shared/types/ipc.ts b/apps/frontend/src/shared/types/ipc.ts index ccbee86f3e..25fb4d30ea 100644 --- a/apps/frontend/src/shared/types/ipc.ts +++ b/apps/frontend/src/shared/types/ipc.ts @@ -50,7 +50,10 @@ import type { SessionDateRestoreResult, RateLimitInfo, SDKRateLimitInfo, - RetryWithProfileRequest + RetryWithProfileRequest, + CreateTerminalWorktreeRequest, + TerminalWorktreeConfig, + TerminalWorktreeResult, } from './terminal'; import type { ClaudeProfileSettings, @@ -59,7 +62,7 @@ import type { ClaudeAuthResult, ClaudeUsageSnapshot } from './agent'; -import type { AppSettings, SourceEnvConfig, SourceEnvCheckResult, AutoBuildSourceUpdateCheck, AutoBuildSourceUpdateProgress } from './settings'; +import type { AppSettings, SourceEnvConfig, SourceEnvCheckResult } from './settings'; import type { AppUpdateInfo, AppUpdateProgress, AppUpdateAvailableEvent, AppUpdateDownloadedEvent } from './app-update'; import type { ChangelogTask, @@ -123,6 +126,7 @@ import type { GitLabMRReviewProgress, GitLabNewCommitsCheck } from './integrations'; +import type { APIProfile, ProfilesFile, TestConnectionResult, DiscoverModelsResult } from './profile'; // Electron API exposed via contextBridge // Tab state interface (persisted in main process) @@ -187,18 +191,26 @@ export interface ElectronAPI { resizeTerminal: (id: string, cols: number, rows: number) => void; invokeClaudeInTerminal: (id: string, cwd?: string) => void; generateTerminalName: (command: string, cwd?: string) => Promise>; + setTerminalTitle: (id: string, title: string) => void; + setTerminalWorktreeConfig: (id: string, config: TerminalWorktreeConfig | undefined) => void; // Terminal session management (persistence/restore) getTerminalSessions: (projectPath: string) => Promise>; restoreTerminalSession: (session: TerminalSession, cols?: number, rows?: number) => Promise>; clearTerminalSessions: (projectPath: string) => Promise; resumeClaudeInTerminal: (id: string, sessionId?: string) => void; + activateDeferredClaudeResume: (id: string) => void; getTerminalSessionDates: (projectPath?: string) => Promise>; getTerminalSessionsForDate: (date: string, projectPath: string) => Promise>; restoreTerminalSessionsFromDate: (date: string, projectPath: string, cols?: number, rows?: number) => Promise>; saveTerminalBuffer: (terminalId: string, serialized: string) => Promise; checkTerminalPtyAlive: (terminalId: string) => Promise>; + // Terminal worktree operations (isolated development) + createTerminalWorktree: (request: CreateTerminalWorktreeRequest) => Promise; + listTerminalWorktrees: (projectPath: string) => Promise>; + removeTerminalWorktree: (projectPath: string, name: string, deleteBranch?: boolean) => Promise; + // Terminal event listeners onTerminalOutput: (callback: (id: string, data: string) => void) => () => void; onTerminalExit: (callback: (id: string, exitCode: number) => void) => () => void; @@ -214,6 +226,16 @@ export interface ElectronAPI { message?: string; detectedAt: string }) => void) => () => void; + /** Listen for auth terminal creation - allows UI to display the OAuth terminal */ + onTerminalAuthCreated: (callback: (info: { + terminalId: string; + profileId: string; + profileName: string + }) => void) => () => void; + /** Listen for Claude busy state changes (for visual indicator: red=busy, green=idle) */ + onTerminalClaudeBusy: (callback: (id: string, isBusy: boolean) => void) => () => void; + /** Listen for pending Claude resume notifications (for deferred resume on tab activation) */ + onTerminalPendingResume: (callback: (id: string, sessionId?: string) => void) => () => void; // Claude profile management (multi-account support) getClaudeProfiles: () => Promise>; @@ -256,6 +278,12 @@ export interface ElectronAPI { // App settings getSettings: () => Promise>; saveSettings: (settings: Partial) => Promise; + + // Sentry error reporting + notifySentryStateChanged: (enabled: boolean) => void; + getSentryDsn: () => Promise; + getSentryConfig: () => Promise<{ dsn: string; tracesSampleRate: number; profilesSampleRate: number }>; + getCliToolsInfo: () => Promise>; + // API Profile management (custom Anthropic-compatible endpoints) + getAPIProfiles: () => Promise>; + saveAPIProfile: (profile: Omit) => Promise>; + updateAPIProfile: (profile: APIProfile) => Promise>; + deleteAPIProfile: (profileId: string) => Promise; + setActiveAPIProfile: (profileId: string | null) => Promise; + // Note: AbortSignal is handled in preload via separate cancel IPC channels, not passed through IPC + testConnection: (baseUrl: string, apiKey: string, signal?: AbortSignal) => Promise>; + discoverModels: (baseUrl: string, apiKey: string, signal?: AbortSignal) => Promise>; + // Dialog operations selectDirectory: () => Promise; createProjectFolder: (location: string, name: string, initGit: boolean) => Promise>; @@ -542,19 +580,10 @@ export interface ElectronAPI { callback: (projectId: string, ideationType: string) => void ) => () => void; - // Auto Claude source update operations - checkAutoBuildSourceUpdate: () => Promise>; - downloadAutoBuildSourceUpdate: () => void; - getAutoBuildSourceVersion: () => Promise>; - - // Auto Claude source update event listeners - onAutoBuildSourceUpdateProgress: ( - callback: (progress: AutoBuildSourceUpdateProgress) => void - ) => () => void; - // Electron app update operations checkAppUpdate: () => Promise>; downloadAppUpdate: () => Promise; + downloadStableUpdate: () => Promise; installAppUpdate: () => void; // Electron app update event listeners @@ -567,6 +596,9 @@ export interface ElectronAPI { onAppUpdateProgress: ( callback: (progress: AppUpdateProgress) => void ) => () => void; + onAppUpdateStableDowngrade: ( + callback: (info: AppUpdateInfo) => void + ) => () => void; // Shell operations openExternal: (url: string) => Promise; diff --git a/apps/frontend/src/shared/types/profile.ts b/apps/frontend/src/shared/types/profile.ts new file mode 100644 index 0000000000..04c9e03b4b --- /dev/null +++ b/apps/frontend/src/shared/types/profile.ts @@ -0,0 +1,92 @@ +/** + * API Profile Management Types + * + * Users can configure custom Anthropic-compatible API endpoints with profiles. + * Each profile contains name, base URL, API key, and optional model mappings. + * + * NOTE: These types are intentionally duplicated from libs/profile-service/src/types/profile.ts + * because the frontend build (Electron + Vite) doesn't consume the workspace library types directly. + * Keep these definitions in sync with the library types when making changes. + */ + +/** + * API Profile - represents a custom API endpoint configuration + * IMPORTANT: Named APIProfile (not Profile) to avoid conflicts with user profiles + */ +export interface APIProfile { + id: string; // UUID v4 + name: string; // User-friendly name + baseUrl: string; // API endpoint URL (e.g., https://api.anthropic.com) + apiKey: string; // Full API key (never display in UI - use maskApiKey()) + models?: { + // OPTIONAL - only specify models to override + default?: string; // Maps to ANTHROPIC_MODEL + haiku?: string; // Maps to ANTHROPIC_DEFAULT_HAIKU_MODEL + sonnet?: string; // Maps to ANTHROPIC_DEFAULT_SONNET_MODEL + opus?: string; // Maps to ANTHROPIC_DEFAULT_OPUS_MODEL + }; + createdAt: number; // Unix timestamp (ms) + updatedAt: number; // Unix timestamp (ms) +} + +/** + * Profile file structure - stored in profiles.json + */ +export interface ProfilesFile { + profiles: APIProfile[]; + activeProfileId: string | null; + version: number; +} + +/** + * Form data type for creating/editing profiles (without id, models optional) + */ +export interface ProfileFormData { + name: string; + baseUrl: string; + apiKey: string; + models?: { + default?: string; + haiku?: string; + sonnet?: string; + opus?: string; + }; +} + +/** + * Shared error type for connection-related errors + * Used by both TestConnectionResult and DiscoverModelsError + */ +export type ConnectionErrorType = 'auth' | 'network' | 'endpoint' | 'timeout' | 'not_supported' | 'unknown'; + +/** + * Test connection result - returned by profile:test-connection + */ +export interface TestConnectionResult { + success: boolean; + errorType?: ConnectionErrorType; + message: string; +} + +/** + * Model information from /v1/models endpoint + */ +export interface ModelInfo { + id: string; // Model ID (e.g., "claude-sonnet-4-20250514") + display_name: string; // Human-readable name (e.g., "Claude Sonnet 4") +} + +/** + * Result from discoverModels operation + */ +export interface DiscoverModelsResult { + models: ModelInfo[]; +} + +/** + * Error from discoverModels operation + */ +export interface DiscoverModelsError { + errorType: ConnectionErrorType; + message: string; +} diff --git a/apps/frontend/src/shared/types/project.ts b/apps/frontend/src/shared/types/project.ts index 2e2e4b0c31..5ac23e847d 100644 --- a/apps/frontend/src/shared/types/project.ts +++ b/apps/frontend/src/shared/types/project.ts @@ -246,13 +246,29 @@ export interface GraphitiMemoryState { error_log: Array<{ timestamp: string; error: string }>; } +export type MemoryType = + | 'session_insight' + | 'codebase_discovery' + | 'codebase_map' + | 'pattern' + | 'gotcha' + | 'task_outcome' + | 'pr_review' + | 'pr_finding' + | 'pr_pattern' + | 'pr_gotcha'; + export interface MemoryEpisode { id: string; - type: 'session_insight' | 'codebase_discovery' | 'codebase_map' | 'pattern' | 'gotcha' | 'task_outcome'; + type: MemoryType; timestamp: string; content: string; session_number?: number; score?: number; + // For PR reviews - extracted from content for quick access + prNumber?: number; + repo?: string; + verdict?: 'approve' | 'request_changes' | 'comment'; } export interface ContextSearchResult { diff --git a/apps/frontend/src/shared/types/settings.ts b/apps/frontend/src/shared/types/settings.ts index 0c9e97846f..5f41c78e24 100644 --- a/apps/frontend/src/shared/types/settings.ts +++ b/apps/frontend/src/shared/types/settings.ts @@ -201,17 +201,19 @@ export interface FeatureThinkingConfig { } // Agent profile for preset model/thinking configurations +// All profiles have per-phase configuration (phaseModels/phaseThinking) export interface AgentProfile { id: string; name: string; description: string; - model: ModelTypeShort; - thinkingLevel: ThinkingLevel; - icon?: string; // Lucide icon name - // Auto profile specific - per-phase configuration - isAutoProfile?: boolean; + model: ModelTypeShort; // Primary model (shown in profile card) + thinkingLevel: ThinkingLevel; // Primary thinking level (shown in profile card) + icon?: string; // Lucide icon name + // Per-phase configuration - all profiles now have this phaseModels?: PhaseModelConfig; phaseThinking?: PhaseThinkingConfig; + /** @deprecated Use phaseModels and phaseThinking for per-phase configuration. Will be removed in v3.0. */ + isAutoProfile?: boolean; } export interface AppSettings { @@ -246,6 +248,9 @@ export interface AppSettings { memoryAzureApiKey?: string; memoryAzureBaseUrl?: string; memoryAzureEmbeddingDeployment?: string; + // Agent Memory Access (MCP) - app-wide defaults + graphitiMcpEnabled?: boolean; + graphitiMcpUrl?: string; // Onboarding wizard completion state onboardingCompleted?: boolean; // Selected agent profile for preset model/thinking configurations @@ -274,6 +279,8 @@ export interface AppSettings { customIDEPath?: string; // For 'custom' IDE preferredTerminal?: SupportedTerminal; customTerminalPath?: string; // For 'custom' terminal + // Anonymous error reporting (Sentry) - enabled by default to help improve the app + sentryEnabled?: boolean; } // Auto-Claude Source Environment Configuration (for auto-claude repo .env) @@ -292,27 +299,3 @@ export interface SourceEnvCheckResult { sourcePath?: string; error?: string; } - -// Auto Claude Source Update Types -export interface AutoBuildSourceUpdateCheck { - updateAvailable: boolean; - currentVersion: string; - latestVersion?: string; - releaseNotes?: string; - releaseUrl?: string; - error?: string; -} - -export interface AutoBuildSourceUpdateResult { - success: boolean; - version?: string; - error?: string; -} - -export interface AutoBuildSourceUpdateProgress { - stage: 'checking' | 'downloading' | 'extracting' | 'complete' | 'error'; - percent?: number; - message: string; - /** New version after successful update - used to refresh UI */ - newVersion?: string; -} diff --git a/apps/frontend/src/shared/types/task.ts b/apps/frontend/src/shared/types/task.ts index 833516bc7a..68cc335757 100644 --- a/apps/frontend/src/shared/types/task.ts +++ b/apps/frontend/src/shared/types/task.ts @@ -228,6 +228,7 @@ export interface TaskMetadata { // Git/Worktree configuration baseBranch?: string; // Override base branch for this task's worktree + useWorktree?: boolean; // If false, use direct mode (no worktree isolation) - default is true for safety // Archive status archivedAt?: string; // ISO date when task was archived diff --git a/apps/frontend/src/shared/types/terminal.ts b/apps/frontend/src/shared/types/terminal.ts index 6fb21d665a..fc7eb93033 100644 --- a/apps/frontend/src/shared/types/terminal.ts +++ b/apps/frontend/src/shared/types/terminal.ts @@ -29,6 +29,8 @@ export interface TerminalSession { outputBuffer: string; createdAt: string; lastActiveAt: string; + /** Associated worktree configuration (validated on restore) */ + worktreeConfig?: TerminalWorktreeConfig; } export interface TerminalRestoreResult { @@ -132,3 +134,57 @@ export interface RetryWithProfileRequest { /** Profile ID to retry with */ profileId: string; } + +// ============================================================================ +// Terminal Worktree Types +// ============================================================================ + +/** + * Configuration for a terminal-associated git worktree + * Enables isolated development environments for each terminal session + */ +export interface TerminalWorktreeConfig { + /** Unique worktree name (used as directory name) */ + name: string; + /** Path to the worktree directory (.auto-claude/worktrees/terminal/{name}/) */ + worktreePath: string; + /** Git branch name (terminal/{name}) - empty if no branch created */ + branchName: string; + /** Base branch the worktree was created from (from project settings or auto-detected) */ + baseBranch: string; + /** Whether a git branch was created for this worktree */ + hasGitBranch: boolean; + /** Associated task ID (optional - for task-linked worktrees) */ + taskId?: string; + /** When the worktree was created */ + createdAt: string; + /** Terminal ID this worktree is associated with */ + terminalId: string; +} + +/** + * Request to create a terminal worktree + */ +export interface CreateTerminalWorktreeRequest { + /** Terminal ID to associate with */ + terminalId: string; + /** Worktree name (alphanumeric, dashes, underscores only) */ + name: string; + /** Optional task ID to link */ + taskId?: string; + /** Whether to create a git branch (terminal/{name}) */ + createGitBranch: boolean; + /** Project path where the worktree will be created */ + projectPath: string; + /** Optional base branch to create worktree from (defaults to project default) */ + baseBranch?: string; +} + +/** + * Result of terminal worktree creation + */ +export interface TerminalWorktreeResult { + success: boolean; + config?: TerminalWorktreeConfig; + error?: string; +} diff --git a/apps/frontend/src/shared/utils/sentry-privacy.ts b/apps/frontend/src/shared/utils/sentry-privacy.ts new file mode 100644 index 0000000000..56be7ead84 --- /dev/null +++ b/apps/frontend/src/shared/utils/sentry-privacy.ts @@ -0,0 +1,210 @@ +/** + * Shared Sentry Privacy Utilities + * + * Provides path masking functions for both main and renderer processes + * to ensure user privacy in error reports. + * + * Privacy approach: + * - Usernames are masked from all file paths + * - Project paths remain visible (this is expected for debugging) + * - All event fields are processed: stack traces, breadcrumbs, messages, + * tags, contexts, extra data, and user info + */ + +// Using a generic event type to work with both main and renderer Sentry SDKs +// The actual type is Sentry.ErrorEvent but we define a compatible interface +// to avoid importing @sentry/electron which has different exports per process +export interface SentryErrorEvent { + exception?: { + values?: Array<{ + stacktrace?: { + frames?: Array<{ + filename?: string; + abs_path?: string; + }>; + }; + value?: string; + }>; + }; + breadcrumbs?: Array<{ + message?: string; + data?: Record; + }>; + message?: string; + tags?: Record; + contexts?: Record | null>; + extra?: Record; + user?: Record; + request?: { + url?: string; + headers?: Record; + data?: unknown; + }; +} + +/** + * Mask user-specific paths for privacy + * + * Replaces usernames in common OS path patterns: + * - macOS: /Users/username/... becomes /Users/.../ + * - Windows: C:\Users\username\... becomes C:\Users\...\ + * - Linux: /home/username/... becomes /home/.../ + * + * Note: Project paths remain visible for debugging purposes. + * This is intentional - we need to know which file caused the error. + */ +export function maskUserPaths(text: string): string { + if (!text) return text; + + // macOS: /Users/username/... or /Users/username (at end of string) + // Uses lookahead to match with or without trailing slash + text = text.replace(/\/Users\/[^/]+(?=\/|$)/g, '/Users/***'); + + // Windows: C:\Users\username\... or C:\Users\username (at end of string) + // Uses lookahead to match with or without trailing backslash + text = text.replace(/[A-Z]:\\Users\\[^\\]+(?=\\|$)/gi, (match: string) => { + const drive = match[0]; + return `${drive}:\\Users\\***`; + }); + + // Linux: /home/username/... or /home/username (at end of string) + // Uses lookahead to match with or without trailing slash + text = text.replace(/\/home\/[^/]+(?=\/|$)/g, '/home/***'); + + return text; +} + +/** + * Recursively mask paths in an object + * Handles nested objects and arrays + */ +function maskObjectPaths(obj: unknown): unknown { + if (obj === null || obj === undefined) { + return obj; + } + + if (typeof obj === 'string') { + return maskUserPaths(obj); + } + + if (Array.isArray(obj)) { + return obj.map(maskObjectPaths); + } + + if (typeof obj === 'object') { + const result: Record = {}; + for (const key of Object.keys(obj as Record)) { + result[key] = maskObjectPaths((obj as Record)[key]); + } + return result; + } + + return obj; +} + +/** + * Process Sentry event to mask sensitive paths + * + * Comprehensive masking covers: + * - Exception stack traces (filename, abs_path) + * - Exception values (error messages) + * - Breadcrumbs (messages and data) + * - Top-level message + * - Tags (custom tags might contain paths) + * - Contexts (additional context data) + * - Extra data (arbitrary data attached to events) + * - User info (cleared entirely for privacy) + * - Request data (URLs, headers) + */ +export function processEvent(event: T): T { + // Mask paths in exception stack traces + if (event.exception?.values) { + for (const exception of event.exception.values) { + if (exception.stacktrace?.frames) { + for (const frame of exception.stacktrace.frames) { + if (frame.filename) { + frame.filename = maskUserPaths(frame.filename); + } + if (frame.abs_path) { + frame.abs_path = maskUserPaths(frame.abs_path); + } + } + } + if (exception.value) { + exception.value = maskUserPaths(exception.value); + } + } + } + + // Mask paths in breadcrumbs + if (event.breadcrumbs) { + for (const breadcrumb of event.breadcrumbs) { + if (breadcrumb.message) { + breadcrumb.message = maskUserPaths(breadcrumb.message); + } + if (breadcrumb.data) { + breadcrumb.data = maskObjectPaths(breadcrumb.data) as Record; + } + } + } + + // Mask paths in message + if (event.message) { + event.message = maskUserPaths(event.message); + } + + // Mask paths in tags + if (event.tags) { + for (const key of Object.keys(event.tags)) { + if (typeof event.tags[key] === 'string') { + event.tags[key] = maskUserPaths(event.tags[key]); + } + } + } + + // Mask paths in contexts (recursively) + if (event.contexts) { + for (const contextKey of Object.keys(event.contexts)) { + const context = event.contexts[contextKey]; + if (context && typeof context === 'object') { + event.contexts[contextKey] = maskObjectPaths(context) as Record; + } + } + } + + // Mask paths in extra data (recursively) + if (event.extra) { + event.extra = maskObjectPaths(event.extra) as Record; + } + + // Clear user info entirely for privacy + // We don't collect any user identifiers + if (event.user) { + event.user = {}; + } + + // Mask paths in request data + if (event.request) { + if (event.request.url) { + event.request.url = maskUserPaths(event.request.url); + } + if (event.request.headers) { + for (const key of Object.keys(event.request.headers)) { + if (typeof event.request.headers[key] === 'string') { + event.request.headers[key] = maskUserPaths(event.request.headers[key]); + } + } + } + if (event.request.data) { + event.request.data = maskObjectPaths(event.request.data); + } + } + + return event; +} + +/** + * Production trace sample rate + * 10% of transactions are sampled for performance monitoring + */ +export const PRODUCTION_TRACE_SAMPLE_RATE = 0.1; diff --git a/apps/frontend/src/types/sentry-electron.d.ts b/apps/frontend/src/types/sentry-electron.d.ts new file mode 100644 index 0000000000..12d62fa0f4 --- /dev/null +++ b/apps/frontend/src/types/sentry-electron.d.ts @@ -0,0 +1,32 @@ +interface SentryErrorEvent { + [key: string]: unknown; +} + +interface SentryScope { + setContext: (key: string, value: Record) => void; +} + +interface SentryInitOptions { + beforeSend?: (event: SentryErrorEvent) => SentryErrorEvent | null; + tracesSampleRate?: number; + profilesSampleRate?: number; + dsn?: string; + environment?: string; + release?: string; + debug?: boolean; + enabled?: boolean; +} + +declare module '@sentry/electron/main' { + export type ErrorEvent = SentryErrorEvent; + export function init(options: SentryInitOptions): void; + export function captureException(error: Error): void; + export function withScope(callback: (scope: SentryScope) => void): void; +} + +declare module '@sentry/electron/renderer' { + export type ErrorEvent = SentryErrorEvent; + export function init(options: SentryInitOptions): void; + export function captureException(error: Error): void; + export function withScope(callback: (scope: SentryScope) => void): void; +} diff --git a/apps/frontend/vitest.config.ts b/apps/frontend/vitest.config.ts index 6eb2f5ee49..199ca6efc4 100644 --- a/apps/frontend/vitest.config.ts +++ b/apps/frontend/vitest.config.ts @@ -15,7 +15,9 @@ export default defineConfig({ }, // Mock Electron modules for unit tests alias: { - electron: resolve(__dirname, 'src/__mocks__/electron.ts') + electron: resolve(__dirname, 'src/__mocks__/electron.ts'), + '@sentry/electron/main': resolve(__dirname, 'src/__mocks__/sentry-electron-main.ts'), + '@sentry/electron/renderer': resolve(__dirname, 'src/__mocks__/sentry-electron-renderer.ts') }, // Setup files for test environment setupFiles: ['src/__tests__/setup.ts'] diff --git a/guides/CLI-USAGE.md b/guides/CLI-USAGE.md index 13008a87bc..dd41f507b1 100644 --- a/guides/CLI-USAGE.md +++ b/guides/CLI-USAGE.md @@ -74,16 +74,16 @@ All commands below should be run from the `apps/backend/` directory: source .venv/bin/activate # Create a spec interactively -python spec_runner.py --interactive +python runners/spec_runner.py --interactive # Or with a task description -python spec_runner.py --task "Add user authentication with OAuth" +python runners/spec_runner.py --task "Add user authentication with OAuth" # Force a specific complexity level -python spec_runner.py --task "Fix button color" --complexity simple +python runners/spec_runner.py --task "Fix button color" --complexity simple # Continue an interrupted spec -python spec_runner.py --continue 001-feature +python runners/spec_runner.py --continue 001-feature ``` ### Complexity Tiers @@ -182,7 +182,35 @@ python validate_spec.py --spec-dir specs/001-feature --checkpoint all ## Environment Variables +Copy `.env.example` to `.env` and configure as needed: + +```bash +cp .env.example .env +``` + +### Core Settings + | Variable | Required | Description | |----------|----------|-------------| | `CLAUDE_CODE_OAUTH_TOKEN` | Yes | OAuth token from `claude setup-token` | | `AUTO_BUILD_MODEL` | No | Model override (default: claude-opus-4-5-20251101) | +| `DEFAULT_BRANCH` | No | Base branch for worktrees (auto-detects main/master) | +| `DEBUG` | No | Enable debug logging (default: false) | + +### Integrations + +| Variable | Required | Description | +|----------|----------|-------------| +| `LINEAR_API_KEY` | No | Linear API key for task sync | +| `GITLAB_TOKEN` | No | GitLab Personal Access Token | +| `GITLAB_INSTANCE_URL` | No | GitLab instance URL (defaults to gitlab.com) | + +### Memory Layer (Graphiti) + +| Variable | Required | Description | +|----------|----------|-------------| +| `GRAPHITI_ENABLED` | No | Enable Memory Layer (default: true) | +| `GRAPHITI_LLM_PROVIDER` | No | LLM provider: openai, anthropic, ollama, google, openrouter | +| `GRAPHITI_EMBEDDER_PROVIDER` | No | Embedder: openai, voyage, ollama, google, openrouter | + +See `.env.example` for complete configuration options including provider-specific settings. diff --git a/guides/README.md b/guides/README.md index 78a0baad2e..6046349f7c 100644 --- a/guides/README.md +++ b/guides/README.md @@ -7,6 +7,8 @@ Detailed documentation for Auto Claude setup and usage. | Guide | Description | |-------|-------------| | **[CLI-USAGE.md](CLI-USAGE.md)** | Terminal-only usage for power users, headless servers, and CI/CD | +| **[windows-development.md](windows-development.md)** | Windows-specific development guide (file encoding, paths, line endings) | +| **[linux.md](linux.md)** | Linux-specific installation and build guide (Flatpak, AppImage) | ## Quick Links diff --git a/guides/linux.md b/guides/linux.md new file mode 100644 index 0000000000..077179f573 --- /dev/null +++ b/guides/linux.md @@ -0,0 +1,95 @@ +# Linux Installation & Building Guide + +This guide covers Linux-specific installation options and building from source. + +## Flatpak Installation + +Flatpak packages are available for Linux users who prefer sandboxed applications. + +### Download Flatpak + +See the [main README](../README.md#beta-release) for Flatpak download links in the Beta Release section. + +### Building Flatpak from Source + +To build the Flatpak package yourself, you need additional dependencies: + +```bash +# Fedora/RHEL +sudo dnf install flatpak-builder + +# Ubuntu/Debian +sudo apt install flatpak-builder + +# Install required Flatpak runtimes +flatpak install flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08 +flatpak install flathub org.electronjs.Electron2.BaseApp//25.08 + +# Build the Flatpak +cd apps/frontend +npm run package:flatpak +``` + +The Flatpak will be created in `apps/frontend/dist/`. + +### Installing the Built Flatpak + +After building, install the Flatpak locally: + +```bash +flatpak install --user apps/frontend/dist/Auto-Claude-*.flatpak +``` + +### Running from Flatpak + +```bash +flatpak run com.autoclaude.AutoClaude +``` + +## Other Linux Packages + +### AppImage + +AppImage files are portable and don't require installation: + +```bash +# Make executable +chmod +x Auto-Claude-*-linux-x86_64.AppImage + +# Run +./Auto-Claude-*-linux-x86_64.AppImage +``` + +### Debian Package (.deb) + +For Ubuntu/Debian systems: + +```bash +sudo dpkg -i Auto-Claude-*-linux-amd64.deb +``` + +## Troubleshooting + +### Flatpak Runtime Issues + +If you encounter runtime issues with Flatpak: + +```bash +# Update runtimes +flatpak update + +# Check for missing runtimes +flatpak list --runtime +``` + +### AppImage Not Starting + +If the AppImage doesn't start: + +```bash +# Check for missing libraries +ldd ./Auto-Claude-*-linux-x86_64.AppImage + +# Try running with debug output +./Auto-Claude-*-linux-x86_64.AppImage --verbose +``` diff --git a/guides/windows-development.md b/guides/windows-development.md new file mode 100644 index 0000000000..e054356b26 --- /dev/null +++ b/guides/windows-development.md @@ -0,0 +1,337 @@ +# Windows Development Guide + +This guide covers Windows-specific considerations when developing +Auto Claude. + +## File Encoding + +### Problem + +Windows Python defaults to the `cp1252` (Windows-1252) code page instead +of UTF-8. This causes encoding errors when reading/writing files with +non-ASCII characters. + +**Common Error:** + +```plaintext +UnicodeDecodeError: 'charmap' codec can't decode byte 0x8d in position 1234 +``` + +### Solution + +**Always specify `encoding="utf-8"` for all text file operations.** + +See [CONTRIBUTING.md - File Encoding](../CONTRIBUTING.md#file-encoding-python) +for detailed examples and patterns. + +### Testing on Windows + +To verify your code works on Windows: + +1. **Test with non-ASCII content:** + + ```python + # Include emoji, international chars in test data + test_data = {"message": "Test ๐Ÿš€ with รฑoรฑo and ไธญๆ–‡"} + ``` + +2. **Run pre-commit hooks:** + + ```bash + pre-commit run check-file-encoding --all-files + ``` + +3. **Run all tests:** + + ```bash + npm run test:backend + ``` + +### Common Pitfalls + +#### Pitfall 1: JSON files + +```python +# Wrong - no encoding +with open("config.json") as f: + data = json.load(f) + +# Correct +with open("config.json", encoding="utf-8") as f: + data = json.load(f) +``` + +#### Pitfall 2: Path methods + +```python +# Wrong +content = Path("README.md").read_text() + +# Correct +content = Path("README.md").read_text(encoding="utf-8") +``` + +#### Pitfall 3: Subprocess output + +```python +# Wrong +result = subprocess.run(cmd, capture_output=True, text=True) + +# Correct +result = subprocess.run(cmd, capture_output=True, encoding="utf-8") +``` + +## Line Endings + +### Problem + +Windows uses CRLF (`\r\n`) line endings while macOS/Linux use LF (`\n`). +This can cause git diffs to show every line as changed. + +### Solution + +1. **Configure git to handle line endings:** + + ```bash + git config --global core.autocrlf true + ``` + +2. **The project's `.gitattributes` handles this automatically:** + + ```plaintext + * text=auto + *.py text eol=lf + *.md text eol=lf + ``` + +3. **In code, normalize when processing:** + + ```python + # Normalize line endings to LF (idiomatic approach) + content = "\n".join(content.splitlines()) + ``` + +## Path Separators + +### Problem + +Windows uses backslash `\` for paths, while Unix uses `/`. +This can break path operations. + +### Solution + +1. **Always use `Path` from `pathlib`:** + + ```python + from pathlib import Path + + # Correct - works on all platforms + config_path = Path("config") / "settings.json" + + # Wrong - Unix only + config_path = "config/settings.json" + ``` + +2. **Use `os.path.join()` for strings:** + + ```python + import os + + # Correct + config_path = os.path.join("config", "settings.json") + ``` + +3. **Never hardcode separators:** + + ```python + # Wrong - Unix only + path = "apps/backend/core" + + # Correct + path = os.path.join("apps", "backend", "core") + # Or better + path = Path("apps") / "backend" / "core" + ``` + +## Shell Commands + +### Problem + +Windows doesn't have bash by default. Shell commands need to work across +platforms. + +### Solution + +1. **Use Python libraries instead of shell:** + + ```python + # Instead of shell commands + import shutil + shutil.copy("source.txt", "dest.txt") # Instead of cp + + import os + os.remove("file.txt") # Instead of rm + ``` + +2. **Use `shlex` for cross-platform commands:** + + ```python + import shlex + import subprocess + + cmd = shlex.split("git rev-parse HEAD") + result = subprocess.run(cmd, capture_output=True, encoding="utf-8") + ``` + +3. **Check platform when needed:** + + ```python + import sys + + if sys.platform == "win32": + # Windows-specific code + pass + else: + # Unix code + pass + ``` + +## Development Environment + +### Recommended Setup on Windows + +1. **Use WSL2 (Windows Subsystem for Linux)** - Recommended: + - Most consistent with production Linux environment + - Full bash support + - Better performance for file I/O + - Install from Microsoft Store or: `wsl --install` + +2. **Or use Git Bash:** + - Comes with Git for Windows + - Provides Unix-like shell + - Lighter than WSL + - Download from [gitforwindows.org](https://gitforwindows.org/) + +3. **Or use PowerShell with Python:** + - Native Windows environment + - Requires extra care with paths/encoding + - Built into Windows + +### Editor Configuration + +**VS Code settings for Windows (`settings.json`):** + +```json +{ + "files.encoding": "utf8", + "files.eol": "\n", + "python.analysis.typeCheckingMode": "basic", + "editor.formatOnSave": true +} +``` + +## Common Issues and Solutions + +### Issue: Permission errors when deleting files + +**Problem:** Windows file locking is stricter than Unix. + +**Solution:** Ensure files are properly closed using context managers: + +```python +# Use context managers +with open(path, encoding="utf-8") as f: + data = f.read() +# File is closed here - safe to delete +``` + +### Issue: Long path names + +**Problem:** Windows has a 260-character path limit (legacy). + +**Solution:** + +1. Enable long paths in Windows 10+ (Group Policy or Registry) +2. Or keep paths short +3. Or use WSL2 + +### Issue: Case-insensitive filesystem + +**Problem:** Windows filesystem is case-insensitive +(`File.txt` == `file.txt`). + +**Solution:** Be consistent with casing in filenames and imports: + +```python +# Consistent casing +from apps.backend.core import Client # File: client.py + +# Avoid mixing cases +from apps.backend.core import client # Could work on Windows but fail on Linux +``` + +## Testing Windows Compatibility + +### Before Submitting a PR + +1. **Run pre-commit hooks:** + + ```bash + pre-commit run --all-files + ``` + +2. **Run all tests:** + + ```bash + npm run test:backend + npm test # frontend tests + ``` + +3. **Test with special characters:** + + ```python + # Add test data with emoji, international chars + test_content = "Test ๐Ÿš€ รฑoรฑo ไธญๆ–‡ ุงู„ุนุฑุจูŠุฉ" + ``` + +### Windows-Specific Test Cases + +Add tests for Windows compatibility when relevant: + +```python +import sys +import pytest + +@pytest.mark.skipif(sys.platform != "win32", reason="Windows only") +def test_windows_encoding(): + """Test Windows encoding with special characters.""" + content = "Test ๐Ÿš€ รฑoรฑo ไธญๆ–‡" + Path("test.txt").write_text(content, encoding="utf-8") + loaded = Path("test.txt").read_text(encoding="utf-8") + assert loaded == content +``` + +## Getting Help + +If you encounter Windows-specific issues: + +1. Check this guide and [CONTRIBUTING.md](../CONTRIBUTING.md) +2. Search [existing issues](https://github.com/AndyMik90/Auto-Claude/issues) +3. Ask in [discussions](https://github.com/AndyMik90/Auto-Claude/discussions) +4. Create an issue with `[Windows]` tag + +## Resources + +- [Python on Windows](https://docs.python.org/3/using/windows.html) +- [pathlib Documentation](https://docs.python.org/3/library/pathlib.html) +- [Git for Windows](https://gitforwindows.org/) +- [WSL2 Documentation](https://docs.microsoft.com/en-us/windows/wsl/) + +## Related + +- [CONTRIBUTING.md](../CONTRIBUTING.md) - General contribution + guidelines +- [PR #782](https://github.com/AndyMik90/Auto-Claude/pull/782) - + Comprehensive UTF-8 encoding fix +- [PR #795](https://github.com/AndyMik90/Auto-Claude/pull/795) - + Pre-commit hooks for encoding enforcement diff --git a/implementation_plan.json b/implementation_plan.json index d44f4f68c6..ae64097a07 100644 --- a/implementation_plan.json +++ b/implementation_plan.json @@ -1,24 +1,30 @@ { - "spec_id": "011-fix-scale-adjustment-and-view-reload-issues", + "spec_id": "025-improving-task-card-title-readability", "subtasks": [ { "id": "1", - "title": "Fix slider to defer view reload until drag ends and add zoom button functionality", + "title": "Restructure TaskCard header: Remove flex wrapper around title, make title standalone with full width", + "status": "completed" + }, + { + "id": "2", + "title": "Relocate status badges from header to metadata section", + "status": "completed" + }, + { + "id": "3", + "title": "Add localization for security severity badge label", "status": "completed" } ], "qa_signoff": { "status": "fixes_applied", - "timestamp": "2025-12-27T02:20:00Z", - "fix_session": 0, + "timestamp": "2026-01-01T11:58:40Z", + "fix_session": 1, "issues_fixed": [ { - "title": "Remove preview hint textbox", - "fix_commit": "2653019" - }, - { - "title": "Remove unused preview translation keys", - "fix_commit": "e17536c" + "title": "Missing localization for hardcoded 'severity' string in TaskCard", + "fix_commit": "de0c8e4" } ], "ready_for_qa_revalidation": true diff --git a/package-lock.json b/package-lock.json index 47bc7c621c..842c9c6a46 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,17 +1,17527 @@ { "name": "auto-claude", - "version": "2.7.2-beta.10", + "version": "2.7.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "auto-claude", - "version": "2.7.2-beta.10", + "version": "2.7.2", "license": "AGPL-3.0", + "workspaces": [ + "apps/*", + "libs/*" + ], + "devDependencies": { + "jsdom": "^27.4.0" + }, "engines": { "node": ">=24.0.0", "npm": ">=10.0.0" } + }, + "apps/frontend": { + "name": "auto-claude-ui", + "version": "2.7.2", + "hasInstallScript": true, + "license": "AGPL-3.0", + "dependencies": { + "@anthropic-ai/sdk": "^0.71.2", + "@dnd-kit/core": "^6.3.1", + "@dnd-kit/sortable": "^10.0.0", + "@dnd-kit/utilities": "^3.2.2", + "@lydell/node-pty": "^1.1.0", + "@radix-ui/react-alert-dialog": "^1.1.15", + "@radix-ui/react-checkbox": "^1.1.4", + "@radix-ui/react-collapsible": "^1.1.3", + "@radix-ui/react-dialog": "^1.1.15", + "@radix-ui/react-dropdown-menu": "^2.1.16", + "@radix-ui/react-popover": "^1.1.15", + "@radix-ui/react-progress": "^1.1.8", + "@radix-ui/react-radio-group": "^1.3.8", + "@radix-ui/react-scroll-area": "^1.2.10", + "@radix-ui/react-select": "^2.2.6", + "@radix-ui/react-separator": "^1.1.8", + "@radix-ui/react-slot": "^1.2.4", + "@radix-ui/react-switch": "^1.2.6", + "@radix-ui/react-tabs": "^1.1.13", + "@radix-ui/react-toast": "^1.2.15", + "@radix-ui/react-tooltip": "^1.2.8", + "@sentry/electron": "^7.5.0", + "@tailwindcss/typography": "^0.5.19", + "@tanstack/react-virtual": "^3.13.13", + "@xterm/addon-fit": "^0.11.0", + "@xterm/addon-serialize": "^0.14.0", + "@xterm/addon-web-links": "^0.12.0", + "@xterm/addon-webgl": "^0.19.0", + "@xterm/xterm": "^6.0.0", + "chokidar": "^5.0.0", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "dotenv": "^16.6.1", + "electron-log": "^5.4.3", + "electron-updater": "^6.6.2", + "i18next": "^25.7.3", + "lucide-react": "^0.562.0", + "minimatch": "^10.1.1", + "motion": "^12.23.26", + "proper-lockfile": "^4.1.2", + "react": "^19.2.3", + "react-dom": "^19.2.3", + "react-i18next": "^16.5.0", + "react-markdown": "^10.1.0", + "react-resizable-panels": "^4.2.0", + "remark-gfm": "^4.0.1", + "semver": "^7.7.3", + "tailwind-merge": "^3.4.0", + "uuid": "^13.0.0", + "zod": "^4.2.1", + "zustand": "^5.0.9" + }, + "devDependencies": { + "@electron-toolkit/preload": "^3.0.2", + "@electron-toolkit/utils": "^4.0.0", + "@electron/rebuild": "^4.0.2", + "@eslint/js": "^9.39.1", + "@playwright/test": "^1.52.0", + "@tailwindcss/postcss": "^4.1.17", + "@testing-library/jest-dom": "^6.9.1", + "@testing-library/react": "^16.1.0", + "@types/minimatch": "^5.1.2", + "@types/node": "^25.0.0", + "@types/react": "^19.2.7", + "@types/react-dom": "^19.2.3", + "@types/semver": "^7.7.1", + "@types/uuid": "^10.0.0", + "@vitejs/plugin-react": "^5.1.2", + "autoprefixer": "^10.4.22", + "cross-env": "^10.1.0", + "electron": "39.2.7", + "electron-builder": "^26.0.12", + "electron-vite": "^5.0.0", + "eslint": "^9.39.1", + "eslint-plugin-react": "^7.37.5", + "eslint-plugin-react-hooks": "^7.0.1", + "globals": "^17.0.0", + "husky": "^9.1.7", + "jsdom": "^27.3.0", + "lint-staged": "^16.2.7", + "postcss": "^8.5.6", + "tailwindcss": "^4.1.17", + "typescript": "^5.9.3", + "typescript-eslint": "^8.50.1", + "vite": "^7.2.7", + "vitest": "^4.0.16" + }, + "engines": { + "node": ">=24.0.0", + "npm": ">=10.0.0" + } + }, + "apps/frontend/node_modules/@lydell/node-pty": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lydell/node-pty/-/node-pty-1.1.0.tgz", + "integrity": "sha512-VDD8LtlMTOrPKWMXUAcB9+LTktzuunqrMwkYR1DMRBkS6LQrCt+0/Ws1o2rMml/n3guePpS7cxhHF7Nm5K4iMw==", + "license": "MIT", + "optionalDependencies": { + "@lydell/node-pty-darwin-arm64": "1.1.0", + "@lydell/node-pty-darwin-x64": "1.1.0", + "@lydell/node-pty-linux-arm64": "1.1.0", + "@lydell/node-pty-linux-x64": "1.1.0", + "@lydell/node-pty-win32-arm64": "1.1.0", + "@lydell/node-pty-win32-x64": "1.1.0" + } + }, + "apps/frontend/node_modules/@lydell/node-pty-darwin-arm64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lydell/node-pty-darwin-arm64/-/node-pty-darwin-arm64-1.1.0.tgz", + "integrity": "sha512-7kFD+owAA61qmhJCtoMbqj3Uvff3YHDiU+4on5F2vQdcMI3MuwGi7dM6MkFG/yuzpw8LF2xULpL71tOPUfxs0w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "apps/frontend/node_modules/@lydell/node-pty-darwin-x64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lydell/node-pty-darwin-x64/-/node-pty-darwin-x64-1.1.0.tgz", + "integrity": "sha512-XZdvqj5FjAMjH8bdp0YfaZjur5DrCIDD1VYiE9EkkYVMDQqRUPHYV3U8BVEQVT9hYfjmpr7dNaELF2KyISWSNA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "apps/frontend/node_modules/@lydell/node-pty-linux-arm64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lydell/node-pty-linux-arm64/-/node-pty-linux-arm64-1.1.0.tgz", + "integrity": "sha512-yyDBmalCfHpLiQMT2zyLcqL2Fay4Xy7rIs8GH4dqKLnEviMvPGOK7LADVkKAsbsyXBSISL3Lt1m1MtxhPH6ckg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "apps/frontend/node_modules/@lydell/node-pty-linux-x64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lydell/node-pty-linux-x64/-/node-pty-linux-x64-1.1.0.tgz", + "integrity": "sha512-NcNqRTD14QT+vXcEuqSSvmWY+0+WUBn2uRE8EN0zKtDpIEr9d+YiFj16Uqds6QfcLCHfZmC+Ls7YzwTaqDnanA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "apps/frontend/node_modules/@lydell/node-pty-win32-arm64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lydell/node-pty-win32-arm64/-/node-pty-win32-arm64-1.1.0.tgz", + "integrity": "sha512-JOMbCou+0fA7d/m97faIIfIU0jOv8sn2OR7tI45u3AmldKoKoLP8zHY6SAvDDnI3fccO1R2HeR1doVjpS7HM0w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "apps/frontend/node_modules/@lydell/node-pty-win32-x64": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lydell/node-pty-win32-x64/-/node-pty-win32-x64-1.1.0.tgz", + "integrity": "sha512-3N56BZ+WDFnUMYRtsrr7Ky2mhWGl9xXcyqR6cexfuCqcz9RNWL+KoXRv/nZylY5dYaXkft4JaR1uVu+roiZDAw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@acemir/cssom": { + "version": "0.9.30", + "resolved": "https://registry.npmjs.org/@acemir/cssom/-/cssom-0.9.30.tgz", + "integrity": "sha512-9CnlMCI0LmCIq0olalQqdWrJHPzm0/tw3gzOA9zJSgvFX7Xau3D24mAGa4BtwxwY69nsuJW6kQqqCzf/mEcQgg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@adobe/css-tools": { + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz", + "integrity": "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@anthropic-ai/sdk": { + "version": "0.71.2", + "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.71.2.tgz", + "integrity": "sha512-TGNDEUuEstk/DKu0/TflXAEt+p+p/WhTlFzEnoosvbaDU2LTjm42igSdlL0VijrKpWejtOKxX0b8A7uc+XiSAQ==", + "license": "MIT", + "dependencies": { + "json-schema-to-ts": "^3.1.1" + }, + "bin": { + "anthropic-ai-sdk": "bin/cli" + }, + "peerDependencies": { + "zod": "^3.25.0 || ^4.0.0" + }, + "peerDependenciesMeta": { + "zod": { + "optional": true + } + } + }, + "node_modules/@apm-js-collab/code-transformer": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/@apm-js-collab/code-transformer/-/code-transformer-0.8.2.tgz", + "integrity": "sha512-YRjJjNq5KFSjDUoqu5pFUWrrsvGOxl6c3bu+uMFc9HNNptZ2rNU/TI2nLw4jnhQNtka972Ee2m3uqbvDQtPeCA==", + "license": "Apache-2.0" + }, + "node_modules/@apm-js-collab/tracing-hooks": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@apm-js-collab/tracing-hooks/-/tracing-hooks-0.3.1.tgz", + "integrity": "sha512-Vu1CbmPURlN5fTboVuKMoJjbO5qcq9fA5YXpskx3dXe/zTBvjODFoerw+69rVBlRLrJpwPqSDqEuJDEKIrTldw==", + "license": "Apache-2.0", + "dependencies": { + "@apm-js-collab/code-transformer": "^0.8.0", + "debug": "^4.4.1", + "module-details-from-path": "^1.0.4" + } + }, + "node_modules/@asamuzakjp/css-color": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.1.1.tgz", + "integrity": "sha512-B0Hv6G3gWGMn0xKJ0txEi/jM5iFpT3MfDxmhZFb4W047GvytCf1DHQ1D69W3zHI4yWe2aTZAA0JnbMZ7Xc8DuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-color-parser": "^3.1.0", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "lru-cache": "^11.2.4" + } + }, + "node_modules/@asamuzakjp/css-color/node_modules/lru-cache": { + "version": "11.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", + "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@asamuzakjp/dom-selector": { + "version": "6.7.6", + "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.7.6.tgz", + "integrity": "sha512-hBaJER6A9MpdG3WgdlOolHmbOYvSk46y7IQN/1+iqiCuUu6iWdQrs9DGKF8ocqsEqWujWf/V7b7vaDgiUmIvUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/nwsapi": "^2.3.9", + "bidi-js": "^1.0.3", + "css-tree": "^3.1.0", + "is-potential-custom-element-name": "^1.0.1", + "lru-cache": "^11.2.4" + } + }, + "node_modules/@asamuzakjp/dom-selector/node_modules/lru-cache": { + "version": "11.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", + "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@asamuzakjp/nwsapi": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz", + "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz", + "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz", + "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.5" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.27.1.tgz", + "integrity": "sha512-8Z4TGic6xW70FKThA5HYEKKyBpOOsucTOD1DjU3fZxDg+K3zBJcXMFnt/4yQiZnf5+MiOMSXQ9PaEK/Ilh1DeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz", + "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@csstools/color-helpers": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz", + "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-calc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", + "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz", + "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^5.1.0", + "@csstools/css-calc": "^2.1.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", + "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "peer": true, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-syntax-patches-for-csstree": { + "version": "1.0.22", + "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.22.tgz", + "integrity": "sha512-qBcx6zYlhleiFfdtzkRgwNC7VVoAwfK76Vmsw5t+PbvtdknO9StgRk7ROvq9so1iqbdW4uLIDAsXRsTfUrIoOw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", + "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "peer": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/@develar/schema-utils": { + "version": "2.6.5", + "resolved": "https://registry.npmjs.org/@develar/schema-utils/-/schema-utils-2.6.5.tgz", + "integrity": "sha512-0cp4PsWQ/9avqTVMCtZ+GirikIA36ikvjtHweU4/j8yLtgObI0+JUPhYFScgwlteveGB1rt3Cm8UhN04XayDig==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.0", + "ajv-keywords": "^3.4.1" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/@dnd-kit/accessibility": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@dnd-kit/accessibility/-/accessibility-3.1.1.tgz", + "integrity": "sha512-2P+YgaXF+gRsIihwwY1gCsQSYnu9Zyj2py8kY5fFvUM1qm2WA2u639R6YNVfU4GWr+ZM5mqEsfHZZLoRONbemw==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/@dnd-kit/core": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@dnd-kit/core/-/core-6.3.1.tgz", + "integrity": "sha512-xkGBRQQab4RLwgXxoqETICr6S5JlogafbhNsidmrkVv2YRs5MLwpjoF2qpiGjQt8S9AoxtIV603s0GIUpY5eYQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "@dnd-kit/accessibility": "^3.1.1", + "@dnd-kit/utilities": "^3.2.2", + "tslib": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@dnd-kit/sortable": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@dnd-kit/sortable/-/sortable-10.0.0.tgz", + "integrity": "sha512-+xqhmIIzvAYMGfBYYnbKuNicfSsk4RksY2XdmJhT+HAC01nix6fHCztU68jooFiMUB01Ky3F0FyOvhG/BZrWkg==", + "license": "MIT", + "dependencies": { + "@dnd-kit/utilities": "^3.2.2", + "tslib": "^2.0.0" + }, + "peerDependencies": { + "@dnd-kit/core": "^6.3.0", + "react": ">=16.8.0" + } + }, + "node_modules/@dnd-kit/utilities": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@dnd-kit/utilities/-/utilities-3.2.2.tgz", + "integrity": "sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/@electron-toolkit/preload": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@electron-toolkit/preload/-/preload-3.0.2.tgz", + "integrity": "sha512-TWWPToXd8qPRfSXwzf5KVhpXMfONaUuRAZJHsKthKgZR/+LqX1dZVSSClQ8OTAEduvLGdecljCsoT2jSshfoUg==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "electron": ">=13.0.0" + } + }, + "node_modules/@electron-toolkit/utils": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@electron-toolkit/utils/-/utils-4.0.0.tgz", + "integrity": "sha512-qXSntwEzluSzKl4z5yFNBknmPGjPa3zFhE4mp9+h0cgokY5ornAeP+CJQDBhKsL1S58aOQfcwkD3NwLZCl+64g==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "electron": ">=13.0.0" + } + }, + "node_modules/@electron/asar": { + "version": "3.2.18", + "resolved": "https://registry.npmjs.org/@electron/asar/-/asar-3.2.18.tgz", + "integrity": "sha512-2XyvMe3N3Nrs8cV39IKELRHTYUWFKrmqqSY1U+GMlc0jvqjIVnoxhNd2H4JolWQncbJi1DCvb5TNxZuI2fEjWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "commander": "^5.0.0", + "glob": "^7.1.6", + "minimatch": "^3.0.4" + }, + "bin": { + "asar": "bin/asar.js" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/@electron/asar/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@electron/fuses": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@electron/fuses/-/fuses-1.8.0.tgz", + "integrity": "sha512-zx0EIq78WlY/lBb1uXlziZmDZI4ubcCXIMJ4uGjXzZW0nS19TjSPeXPAjzzTmKQlJUZm0SbmZhPKP7tuQ1SsEw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.1", + "fs-extra": "^9.0.1", + "minimist": "^1.2.5" + }, + "bin": { + "electron-fuses": "dist/bin.js" + } + }, + "node_modules/@electron/fuses/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@electron/fuses/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/@electron/fuses/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@electron/get": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@electron/get/-/get-2.0.3.tgz", + "integrity": "sha512-Qkzpg2s9GnVV2I2BjRksUi43U5e6+zaQMcjoJy0C+C5oxaKl+fmckGDQFtRpZpZV0NQekuZZ+tGz7EA9TVnQtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.1.1", + "env-paths": "^2.2.0", + "fs-extra": "^8.1.0", + "got": "^11.8.5", + "progress": "^2.0.3", + "semver": "^6.2.0", + "sumchecker": "^3.0.1" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "global-agent": "^3.0.0" + } + }, + "node_modules/@electron/get/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@electron/node-gyp": { + "version": "10.2.0-electron.1", + "resolved": "git+ssh://git@github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2", + "integrity": "sha512-4MSBTT8y07YUDqf69/vSh80Hh791epYqGtWHO3zSKhYFwQg+gx9wi1PqbqP6YqC4WMsNxZ5l9oDmnWdK5pfCKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.0", + "exponential-backoff": "^3.1.1", + "glob": "^8.1.0", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^10.2.1", + "nopt": "^6.0.0", + "proc-log": "^2.0.1", + "semver": "^7.3.5", + "tar": "^6.2.1", + "which": "^2.0.2" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": ">=12.13.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/@npmcli/fs": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-2.1.2.tgz", + "integrity": "sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@gar/promisify": "^1.1.3", + "semver": "^7.3.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "dev": true, + "license": "ISC" + }, + "node_modules/@electron/node-gyp/node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/cacache": { + "version": "16.1.3", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.1.3.tgz", + "integrity": "sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", + "infer-owner": "^1.0.4", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", + "unique-filename": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@electron/node-gyp/node_modules/glob": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", + "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@electron/node-gyp/node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/@electron/node-gyp/node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/@electron/node-gyp/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/@electron/node-gyp/node_modules/make-fetch-happen": { + "version": "10.2.1", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz", + "integrity": "sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w==", + "dev": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^2.0.3", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^7.0.0", + "ssri": "^9.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@electron/node-gyp/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@electron/node-gyp/node_modules/minipass-collect": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", + "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@electron/node-gyp/node_modules/minipass-fetch": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.1.2.tgz", + "integrity": "sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.1.6", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/@electron/node-gyp/node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@electron/node-gyp/node_modules/negotiator": { + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", + "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/@electron/node-gyp/node_modules/nopt": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-6.0.0.tgz", + "integrity": "sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==", + "dev": true, + "license": "ISC", + "dependencies": { + "abbrev": "^1.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@electron/node-gyp/node_modules/proc-log": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-2.0.1.tgz", + "integrity": "sha512-Kcmo2FhfDTXdcbfDH76N7uBYHINxc/8GW7UAVuVP9I+Va3uHSerrnKV6dLooga/gh7GlgzuCCr/eoldnL1muGw==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@electron/node-gyp/node_modules/socks-proxy-agent": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", + "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/@electron/node-gyp/node_modules/ssri": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-9.0.1.tgz", + "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.1.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/unique-filename": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-2.0.1.tgz", + "integrity": "sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^3.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/unique-slug": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-3.0.0.tgz", + "integrity": "sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@electron/node-gyp/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/@electron/notarize": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@electron/notarize/-/notarize-2.5.0.tgz", + "integrity": "sha512-jNT8nwH1f9X5GEITXaQ8IF/KdskvIkOFfB2CvwumsveVidzpSc+mvhhTMdAGSYF3O+Nq49lJ7y+ssODRXu06+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.1.1", + "fs-extra": "^9.0.1", + "promise-retry": "^2.0.1" + }, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@electron/notarize/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@electron/notarize/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/@electron/notarize/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@electron/osx-sign": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@electron/osx-sign/-/osx-sign-1.3.1.tgz", + "integrity": "sha512-BAfviURMHpmb1Yb50YbCxnOY0wfwaLXH5KJ4+80zS0gUkzDX3ec23naTlEqKsN+PwYn+a1cCzM7BJ4Wcd3sGzw==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "compare-version": "^0.1.2", + "debug": "^4.3.4", + "fs-extra": "^10.0.0", + "isbinaryfile": "^4.0.8", + "minimist": "^1.2.6", + "plist": "^3.0.5" + }, + "bin": { + "electron-osx-flat": "bin/electron-osx-flat.js", + "electron-osx-sign": "bin/electron-osx-sign.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/@electron/osx-sign/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@electron/osx-sign/node_modules/isbinaryfile": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.10.tgz", + "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/gjtorikian/" + } + }, + "node_modules/@electron/osx-sign/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/@electron/osx-sign/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@electron/rebuild": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-4.0.2.tgz", + "integrity": "sha512-8iZWVPvOpCdIc5Pj5udQV3PeO7liJVC7BBUSizl1HCfP7ZxYc9Kqz0c3PDNj2HQ5cQfJ5JaBeJIYKPjAvLn2Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@malept/cross-spawn-promise": "^2.0.0", + "debug": "^4.1.1", + "detect-libc": "^2.0.1", + "got": "^11.7.0", + "graceful-fs": "^4.2.11", + "node-abi": "^4.2.0", + "node-api-version": "^0.2.1", + "node-gyp": "^11.2.0", + "ora": "^5.1.0", + "read-binary-file-arch": "^1.0.6", + "semver": "^7.3.5", + "tar": "^6.0.5", + "yargs": "^17.0.1" + }, + "bin": { + "electron-rebuild": "lib/cli.js" + }, + "engines": { + "node": ">=22.12.0" + } + }, + "node_modules/@electron/universal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@electron/universal/-/universal-2.0.1.tgz", + "integrity": "sha512-fKpv9kg4SPmt+hY7SVBnIYULE9QJl8L3sCfcBsnqbJwwBwAeTLokJ9TRt9y7bK0JAzIW2y78TVVjvnQEms/yyA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@electron/asar": "^3.2.7", + "@malept/cross-spawn-promise": "^2.0.0", + "debug": "^4.3.1", + "dir-compare": "^4.2.0", + "fs-extra": "^11.1.1", + "minimatch": "^9.0.3", + "plist": "^3.1.0" + }, + "engines": { + "node": ">=16.4" + } + }, + "node_modules/@electron/universal/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@electron/universal/node_modules/fs-extra": { + "version": "11.3.3", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.3.tgz", + "integrity": "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/@electron/universal/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/@electron/universal/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@electron/universal/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@electron/windows-sign": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@electron/windows-sign/-/windows-sign-1.2.2.tgz", + "integrity": "sha512-dfZeox66AvdPtb2lD8OsIIQh12Tp0GNCRUDfBHIKGpbmopZto2/A8nSpYYLoedPIHpqkeblZ/k8OV0Gy7PYuyQ==", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "dependencies": { + "cross-dirname": "^0.1.0", + "debug": "^4.3.4", + "fs-extra": "^11.1.1", + "minimist": "^1.2.8", + "postject": "^1.0.0-alpha.6" + }, + "bin": { + "electron-windows-sign": "bin/electron-windows-sign.js" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/@electron/windows-sign/node_modules/fs-extra": { + "version": "11.3.3", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.3.tgz", + "integrity": "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/@electron/windows-sign/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/@electron/windows-sign/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@epic-web/invariant": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@epic-web/invariant/-/invariant-1.0.0.tgz", + "integrity": "sha512-lrTPqgvfFQtR/eY/qkIzp98OGdNJu0m5ji3q/nJI8v3SXkRKEnWiOxMmbvcSoAIzv/cGiuvRy57k4suKQSAdwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", + "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz", + "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz", + "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz", + "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz", + "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz", + "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz", + "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz", + "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz", + "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz", + "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz", + "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz", + "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz", + "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz", + "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz", + "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz", + "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz", + "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz", + "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz", + "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz", + "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz", + "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz", + "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz", + "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz", + "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz", + "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz", + "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@exodus/bytes": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@exodus/bytes/-/bytes-1.7.0.tgz", + "integrity": "sha512-5i+BtvujK/vM07YCGDyz4C4AyDzLmhxHMtM5HpUyPRtJPBdFPsj290ffXW+UXY21/G7GtXeHD2nRmq0T1ShyQQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + }, + "peerDependencies": { + "@exodus/crypto": "^1.0.0-rc.4" + }, + "peerDependenciesMeta": { + "@exodus/crypto": { + "optional": true + } + } + }, + "node_modules/@floating-ui/core": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz", + "integrity": "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==", + "license": "MIT", + "dependencies": { + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz", + "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==", + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.7.3", + "@floating-ui/utils": "^0.2.10" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.6.tgz", + "integrity": "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==", + "license": "MIT", + "dependencies": { + "@floating-ui/dom": "^1.7.4" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.10.tgz", + "integrity": "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", + "license": "MIT" + }, + "node_modules/@gar/promisify": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@gar/promisify/-/promisify-1.1.3.tgz", + "integrity": "sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@isaacs/balanced-match": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz", + "integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==", + "license": "MIT", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/brace-expansion": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz", + "integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==", + "license": "MIT", + "dependencies": { + "@isaacs/balanced-match": "^4.0.1" + }, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@malept/cross-spawn-promise": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@malept/cross-spawn-promise/-/cross-spawn-promise-2.0.0.tgz", + "integrity": "sha512-1DpKU0Z5ThltBwjNySMC14g0CkbyhCaz9FkhxqNsZI6uAPJXFS8cMXlBKo26FJ8ZuW6S9GCMcR9IO5k2X5/9Fg==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/malept" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/subscription/pkg/npm-.malept-cross-spawn-promise?utm_medium=referral&utm_source=npm_fund" + } + ], + "license": "Apache-2.0", + "dependencies": { + "cross-spawn": "^7.0.1" + }, + "engines": { + "node": ">= 12.13.0" + } + }, + "node_modules/@malept/flatpak-bundler": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@malept/flatpak-bundler/-/flatpak-bundler-0.4.0.tgz", + "integrity": "sha512-9QOtNffcOF/c1seMCDnjckb3R9WHcG34tky+FHpNKKCW0wc/scYLwMtO+ptyGUfMW0/b/n4qRiALlaFHc9Oj7Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.1.1", + "fs-extra": "^9.0.0", + "lodash": "^4.17.15", + "tmp-promise": "^3.0.2" + }, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@malept/flatpak-bundler/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@malept/flatpak-bundler/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/@malept/flatpak-bundler/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@npmcli/agent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/agent/-/agent-3.0.0.tgz", + "integrity": "sha512-S79NdEgDQd/NGCay6TCoVzXSj74skRZIKJcpJjC5lOq34SZzyI6MqtiiWoiVWoVrTcGjNeC4ipbh1VIHlpfF5Q==", + "dev": true, + "license": "ISC", + "dependencies": { + "agent-base": "^7.1.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.1", + "lru-cache": "^10.0.1", + "socks-proxy-agent": "^8.0.3" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/@npmcli/agent/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/@npmcli/fs": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-4.0.0.tgz", + "integrity": "sha512-/xGlezI6xfGO9NwuJlnwz/K14qD1kCSAGtacBHnGzeAIuJGazcp45KP5NuyARXoKb7cwulAGWVsbeSxdG/cb0Q==", + "dev": true, + "license": "ISC", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/@npmcli/move-file": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@npmcli/move-file/-/move-file-2.0.1.tgz", + "integrity": "sha512-mJd2Z5TjYWq/ttPLLGqArdtnC74J6bOzg4rMDnN+p1xTacZ2yPRCk2y0oSWQtygLR9YVQXgOcONrwtnk3JupxQ==", + "deprecated": "This functionality has been moved to @npmcli/fs", + "dev": true, + "license": "MIT", + "dependencies": { + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@npmcli/move-file/node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@opentelemetry/api": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", + "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + "license": "Apache-2.0", + "peer": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/api-logs": { + "version": "0.208.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.208.0.tgz", + "integrity": "sha512-CjruKY9V6NMssL/T1kAFgzosF1v9o6oeN+aX5JB/C/xPNtmgIJqcXHG7fA82Ou1zCpWGl4lROQUKwUNE1pMCyg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/context-async-hooks": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-2.2.0.tgz", + "integrity": "sha512-qRkLWiUEZNAmYapZ7KGS5C4OmBLcP/H2foXeOEaowYCR0wi89fHejrfYfbuLVCMLp/dWZXKvQusdbUEZjERfwQ==", + "license": "Apache-2.0", + "peer": true, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/core": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.2.0.tgz", + "integrity": "sha512-FuabnnUm8LflnieVxs6eP7Z383hgQU4W1e3KJS6aOG3RxWxcHyBxH8fDMHNgu/gFx/M2jvTOW/4/PHhLz6bjWw==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/instrumentation": { + "version": "0.208.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.208.0.tgz", + "integrity": "sha512-Eju0L4qWcQS+oXxi6pgh7zvE2byogAkcsVv0OjHF/97iOz1N/aKE6etSGowYkie+YA1uo6DNwdSxaaNnLvcRlA==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@opentelemetry/api-logs": "0.208.0", + "import-in-the-middle": "^2.0.0", + "require-in-the-middle": "^8.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-amqplib": { + "version": "0.55.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-amqplib/-/instrumentation-amqplib-0.55.0.tgz", + "integrity": "sha512-5ULoU8p+tWcQw5PDYZn8rySptGSLZHNX/7srqo2TioPnAAcvTy6sQFQXsNPrAnyRRtYGMetXVyZUy5OaX1+IfA==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.208.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-connect": { + "version": "0.52.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-connect/-/instrumentation-connect-0.52.0.tgz", + "integrity": "sha512-GXPxfNB5szMbV3I9b7kNWSmQBoBzw7MT0ui6iU/p+NIzVx3a06Ri2cdQO7tG9EKb4aKSLmfX9Cw5cKxXqX6Ohg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/semantic-conventions": "^1.27.0", + "@types/connect": "3.4.38" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-dataloader": { + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-dataloader/-/instrumentation-dataloader-0.26.0.tgz", + "integrity": "sha512-P2BgnFfTOarZ5OKPmYfbXfDFjQ4P9WkQ1Jji7yH5/WwB6Wm/knynAoA1rxbjWcDlYupFkyT0M1j6XLzDzy0aCA==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-express": { + "version": "0.57.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-express/-/instrumentation-express-0.57.0.tgz", + "integrity": "sha512-HAdx/o58+8tSR5iW+ru4PHnEejyKrAy9fYFhlEI81o10nYxrGahnMAHWiSjhDC7UQSY3I4gjcPgSKQz4rm/asg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-fs": { + "version": "0.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-fs/-/instrumentation-fs-0.28.0.tgz", + "integrity": "sha512-FFvg8fq53RRXVBRHZViP+EMxMR03tqzEGpuq55lHNbVPyFklSVfQBN50syPhK5UYYwaStx0eyCtHtbRreusc5g==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.208.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-generic-pool": { + "version": "0.52.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-generic-pool/-/instrumentation-generic-pool-0.52.0.tgz", + "integrity": "sha512-ISkNcv5CM2IwvsMVL31Tl61/p2Zm2I2NAsYq5SSBgOsOndT0TjnptjufYVScCnD5ZLD1tpl4T3GEYULLYOdIdQ==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-graphql": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-graphql/-/instrumentation-graphql-0.56.0.tgz", + "integrity": "sha512-IPvNk8AFoVzTAM0Z399t34VDmGDgwT6rIqCUug8P9oAGerl2/PEIYMPOl/rerPGu+q8gSWdmbFSjgg7PDVRd3Q==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-hapi": { + "version": "0.55.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-hapi/-/instrumentation-hapi-0.55.0.tgz", + "integrity": "sha512-prqAkRf9e4eEpy4G3UcR32prKE8NLNlA90TdEU1UsghOTg0jUvs40Jz8LQWFEs5NbLbXHYGzB4CYVkCI8eWEVQ==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-http": { + "version": "0.208.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-http/-/instrumentation-http-0.208.0.tgz", + "integrity": "sha512-rhmK46DRWEbQQB77RxmVXGyjs6783crXCnFjYQj+4tDH/Kpv9Rbg3h2kaNyp5Vz2emF1f9HOQQvZoHzwMWOFZQ==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "2.2.0", + "@opentelemetry/instrumentation": "0.208.0", + "@opentelemetry/semantic-conventions": "^1.29.0", + "forwarded-parse": "2.1.2" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-ioredis": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-ioredis/-/instrumentation-ioredis-0.56.0.tgz", + "integrity": "sha512-XSWeqsd3rKSsT3WBz/JKJDcZD4QYElZEa0xVdX8f9dh4h4QgXhKRLorVsVkK3uXFbC2sZKAS2Ds+YolGwD83Dg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/redis-common": "^0.38.2" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-kafkajs": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-kafkajs/-/instrumentation-kafkajs-0.18.0.tgz", + "integrity": "sha512-KCL/1HnZN5zkUMgPyOxfGjLjbXjpd4odDToy+7c+UsthIzVLFf99LnfIBE8YSSrYE4+uS7OwJMhvhg3tWjqMBg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/semantic-conventions": "^1.30.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-knex": { + "version": "0.53.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-knex/-/instrumentation-knex-0.53.0.tgz", + "integrity": "sha512-xngn5cH2mVXFmiT1XfQ1aHqq1m4xb5wvU6j9lSgLlihJ1bXzsO543cpDwjrZm2nMrlpddBf55w8+bfS4qDh60g==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/semantic-conventions": "^1.33.1" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-koa": { + "version": "0.57.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-koa/-/instrumentation-koa-0.57.0.tgz", + "integrity": "sha512-3JS8PU/D5E3q295mwloU2v7c7/m+DyCqdu62BIzWt+3u9utjxC9QS7v6WmUNuoDN3RM+Q+D1Gpj13ERo+m7CGg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/semantic-conventions": "^1.36.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.9.0" + } + }, + "node_modules/@opentelemetry/instrumentation-lru-memoizer": { + "version": "0.53.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-lru-memoizer/-/instrumentation-lru-memoizer-0.53.0.tgz", + "integrity": "sha512-LDwWz5cPkWWr0HBIuZUjslyvijljTwmwiItpMTHujaULZCxcYE9eU44Qf/pbVC8TulT0IhZi+RoGvHKXvNhysw==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-mongodb": { + "version": "0.61.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-mongodb/-/instrumentation-mongodb-0.61.0.tgz", + "integrity": "sha512-OV3i2DSoY5M/pmLk+68xr5RvkHU8DRB3DKMzYJdwDdcxeLs62tLbkmRyqJZsYf3Ht7j11rq35pHOWLuLzXL7pQ==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-mongoose": { + "version": "0.55.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-mongoose/-/instrumentation-mongoose-0.55.0.tgz", + "integrity": "sha512-5afj0HfF6aM6Nlqgu6/PPHFk8QBfIe3+zF9FGpX76jWPS0/dujoEYn82/XcLSaW5LPUDW8sni+YeK0vTBNri+w==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.208.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-mysql": { + "version": "0.54.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-mysql/-/instrumentation-mysql-0.54.0.tgz", + "integrity": "sha512-bqC1YhnwAeWmRzy1/Xf9cDqxNG2d/JDkaxnqF5N6iJKN1eVWI+vg7NfDkf52/Nggp3tl1jcC++ptC61BD6738A==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0", + "@types/mysql": "2.15.27" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-mysql2": { + "version": "0.55.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-mysql2/-/instrumentation-mysql2-0.55.0.tgz", + "integrity": "sha512-0cs8whQG55aIi20gnK8B7cco6OK6N+enNhW0p5284MvqJ5EPi+I1YlWsWXgzv/V2HFirEejkvKiI4Iw21OqDWg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/semantic-conventions": "^1.33.0", + "@opentelemetry/sql-common": "^0.41.2" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-pg": { + "version": "0.61.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-pg/-/instrumentation-pg-0.61.0.tgz", + "integrity": "sha512-UeV7KeTnRSM7ECHa3YscoklhUtTQPs6V6qYpG283AB7xpnPGCUCUfECFT9jFg6/iZOQTt3FHkB1wGTJCNZEvPw==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/semantic-conventions": "^1.34.0", + "@opentelemetry/sql-common": "^0.41.2", + "@types/pg": "8.15.6", + "@types/pg-pool": "2.0.6" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-redis": { + "version": "0.57.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-redis/-/instrumentation-redis-0.57.0.tgz", + "integrity": "sha512-bCxTHQFXzrU3eU1LZnOZQ3s5LURxQPDlU3/upBzlWY77qOI1GZuGofazj3jtzjctMJeBEJhNwIFEgRPBX1kp/Q==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/redis-common": "^0.38.2", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-tedious": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-tedious/-/instrumentation-tedious-0.27.0.tgz", + "integrity": "sha512-jRtyUJNZppPBjPae4ZjIQ2eqJbcRaRfJkr0lQLHFmOU/no5A6e9s1OHLd5XZyZoBJ/ymngZitanyRRA5cniseA==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": "^0.208.0", + "@types/tedious": "^4.0.14" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-undici": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-undici/-/instrumentation-undici-0.19.0.tgz", + "integrity": "sha512-Pst/RhR61A2OoZQZkn6OLpdVpXp6qn3Y92wXa6umfJe9rV640r4bc6SWvw4pPN6DiQqPu2c8gnSSZPDtC6JlpQ==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/semantic-conventions": "^1.24.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.7.0" + } + }, + "node_modules/@opentelemetry/redis-common": { + "version": "0.38.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/redis-common/-/redis-common-0.38.2.tgz", + "integrity": "sha512-1BCcU93iwSRZvDAgwUxC/DV4T/406SkMfxGqu5ojc3AvNI+I9GhV7v0J1HljsczuuhcnFLYqD5VmwVXfCGHzxA==", + "license": "Apache-2.0", + "engines": { + "node": "^18.19.0 || >=20.6.0" + } + }, + "node_modules/@opentelemetry/resources": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.2.0.tgz", + "integrity": "sha512-1pNQf/JazQTMA0BiO5NINUzH0cbLbbl7mntLa4aJNmCCXSj0q03T5ZXXL0zw4G55TjdL9Tz32cznGClf+8zr5A==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@opentelemetry/core": "2.2.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-trace-base": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-2.2.0.tgz", + "integrity": "sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw==", + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@opentelemetry/core": "2.2.0", + "@opentelemetry/resources": "2.2.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/semantic-conventions": { + "version": "1.38.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.38.0.tgz", + "integrity": "sha512-kocjix+/sSggfJhwXqClZ3i9Y/MI0fp7b+g7kCRm6psy2dsf8uApTRclwG18h8Avm7C9+fnt+O36PspJ/OzoWg==", + "license": "Apache-2.0", + "peer": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sql-common": { + "version": "0.41.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/sql-common/-/sql-common-0.41.2.tgz", + "integrity": "sha512-4mhWm3Z8z+i508zQJ7r6Xi7y4mmoJpdvH0fZPFRkWrdp5fq7hhZ2HhYokEOLkfqSMgPR4Z9EyB3DBkbKGOqZiQ==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "^2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@playwright/test": { + "version": "1.57.0", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.57.0.tgz", + "integrity": "sha512-6TyEnHgd6SArQO8UO2OMTxshln3QMWBtPGrOCgs3wVEmQmwyuNtB10IZMfmYDE0riwNR1cu4q+pPcxMVtaG3TA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "playwright": "1.57.0" + }, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@prisma/instrumentation": { + "version": "6.19.0", + "resolved": "https://registry.npmjs.org/@prisma/instrumentation/-/instrumentation-6.19.0.tgz", + "integrity": "sha512-QcuYy25pkXM8BJ37wVFBO7Zh34nyRV1GOb2n3lPkkbRYfl4hWl3PTcImP41P0KrzVXfa/45p6eVCos27x3exIg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/instrumentation": ">=0.52.0 <1" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.8" + } + }, + "node_modules/@radix-ui/number": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.1.tgz", + "integrity": "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + "license": "MIT" + }, + "node_modules/@radix-ui/primitive": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz", + "integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-alert-dialog": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-alert-dialog/-/react-alert-dialog-1.1.15.tgz", + "integrity": "sha512-oTVLkEw5GpdRe29BqJ0LSDFWI3qu0vR1M0mUkOQWDIUnY/QIkLpgDMWuKxP94c2NAC2LGcgVhG1ImF3jkZ5wXw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dialog": "1.1.15", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-alert-dialog/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-checkbox": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.3.3.tgz", + "integrity": "sha512-wBbpv+NQftHDdG86Qc0pIyXk5IR3tM8Vd0nWLKDcX8nNn4nXFOFwsKuqw2okA/1D/mpaAkmuyndrPJTYDNZtFw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collapsible": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.1.12.tgz", + "integrity": "sha512-Uu+mSh4agx2ib1uIGPP4/CKNULyajb3p92LsVXmH2EHVMTfZWpll88XJ0j4W0z3f8NK1eYl1+Mf/szHPmcHzyA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.15.tgz", + "integrity": "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-direction": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz", + "integrity": "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz", + "integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.16.tgz", + "integrity": "sha512-1PLGQEynI/3OX/ftV54COn+3Sud/Mn8vALg2rWnBLnRaGtJDduNW/22XjlGgPdpcIbiQxjKtb7BkcjP00nqfJw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz", + "integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.16.tgz", + "integrity": "sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.15.tgz", + "integrity": "sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz", + "integrity": "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.5.tgz", + "integrity": "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-progress/-/react-progress-1.1.8.tgz", + "integrity": "sha512-+gISHcSPUJ7ktBy9RnTqbdKW78bcGke3t6taawyZ71pio1JewwGSJizycs7rLhGTvMJYCQB1DBK4KQsxs7U8dA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-context": "1.1.3", + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-context": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.3.tgz", + "integrity": "sha512-ieIFACdMpYfMEjF0rEf5KLvfVyIkOz6PDGyNnP+u+4xQ6jny3VCgA4OgXOwNx2aUkxn8zx9fiVcM8CfFYv9Lxw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-progress/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-radio-group": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.3.8.tgz", + "integrity": "sha512-VBKYIYImA5zsxACdisNQ3BjCBfmbGH3kQlnFVqlWU4tXwjy7cGX8ta80BcrO+WJXIn5iBylEH3K6ZTlee//lgQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.11.tgz", + "integrity": "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-scroll-area": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.10.tgz", + "integrity": "sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select": { + "version": "2.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.6.tgz", + "integrity": "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-separator": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.8.tgz", + "integrity": "sha512-sDvqVY4itsKwwSMEe0jtKgfTh+72Sy3gPmQpjqcQneqQ4PFmr/1I0YA+2/puilhggCe2gJcx5EBAYFkWkdpa5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-separator/node_modules/@radix-ui/react-primitive": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.4.tgz", + "integrity": "sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.4" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-switch": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.2.6.tgz", + "integrity": "sha512-bByzr1+ep1zk4VubeEVViV592vu2lHE2BZY5OnzehZqOOgogN80+mNtCqPkhn2gklJqOpxWgPoYTSnhBCqpOXQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.13.tgz", + "integrity": "sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-toast": { + "version": "1.2.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.15.tgz", + "integrity": "sha512-3OSz3TacUWy4WtOXV38DggwxoqJK4+eDkNMl5Z/MJZaoUPaP4/9lf81xXMe1I2ReTAptverZUpbPY4wWwWyL5g==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.8.tgz", + "integrity": "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-visually-hidden": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-effect-event": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz", + "integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-previous": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.1.tgz", + "integrity": "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.1.tgz", + "integrity": "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", + "license": "MIT", + "dependencies": { + "@radix-ui/rect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.1.tgz", + "integrity": "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.1.tgz", + "integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", + "license": "MIT" + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.53", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.53.tgz", + "integrity": "sha512-vENRlFU4YbrwVqNDZ7fLvy+JR1CRkyr01jhSiDpE1u6py3OMzQfztQU2jxykW3ALNxO4kSlqIDeYyD0Y9RcQeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.54.0.tgz", + "integrity": "sha512-OywsdRHrFvCdvsewAInDKCNyR3laPA2mc9bRYJ6LBp5IyvF3fvXbbNR0bSzHlZVFtn6E0xw2oZlyjg4rKCVcng==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.54.0.tgz", + "integrity": "sha512-Skx39Uv+u7H224Af+bDgNinitlmHyQX1K/atIA32JP3JQw6hVODX5tkbi2zof/E69M1qH2UoN3Xdxgs90mmNYw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.54.0.tgz", + "integrity": "sha512-k43D4qta/+6Fq+nCDhhv9yP2HdeKeP56QrUUTW7E6PhZP1US6NDqpJj4MY0jBHlJivVJD5P8NxrjuobZBJTCRw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.54.0.tgz", + "integrity": "sha512-cOo7biqwkpawslEfox5Vs8/qj83M/aZCSSNIWpVzfU2CYHa2G3P1UN5WF01RdTHSgCkri7XOlTdtk17BezlV3A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.54.0.tgz", + "integrity": "sha512-miSvuFkmvFbgJ1BevMa4CPCFt5MPGw094knM64W9I0giUIMMmRYcGW/JWZDriaw/k1kOBtsWh1z6nIFV1vPNtA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.54.0.tgz", + "integrity": "sha512-KGXIs55+b/ZfZsq9aR026tmr/+7tq6VG6MsnrvF4H8VhwflTIuYh+LFUlIsRdQSgrgmtM3fVATzEAj4hBQlaqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.54.0.tgz", + "integrity": "sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.54.0.tgz", + "integrity": "sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.54.0.tgz", + "integrity": "sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.54.0.tgz", + "integrity": "sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.54.0.tgz", + "integrity": "sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.54.0.tgz", + "integrity": "sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.54.0.tgz", + "integrity": "sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.54.0.tgz", + "integrity": "sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.54.0.tgz", + "integrity": "sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.54.0.tgz", + "integrity": "sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.54.0.tgz", + "integrity": "sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.54.0.tgz", + "integrity": "sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.54.0.tgz", + "integrity": "sha512-c2V0W1bsKIKfbLMBu/WGBz6Yci8nJ/ZJdheE0EwB73N3MvHYKiKGs3mVilX4Gs70eGeDaMqEob25Tw2Gb9Nqyw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.54.0.tgz", + "integrity": "sha512-woEHgqQqDCkAzrDhvDipnSirm5vxUXtSKDYTVpZG3nUdW/VVB5VdCYA2iReSj/u3yCZzXID4kuKG7OynPnB3WQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.54.0.tgz", + "integrity": "sha512-dzAc53LOuFvHwbCEOS0rPbXp6SIhAf2txMP5p6mGyOXXw5mWY8NGGbPMPrs4P1WItkfApDathBj/NzMLUZ9rtQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.54.0.tgz", + "integrity": "sha512-hYT5d3YNdSh3mbCU1gwQyPgQd3T2ne0A3KG8KSBdav5TiBg6eInVmV+TeR5uHufiIgSFg0XsOWGW5/RhNcSvPg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@sentry-internal/browser-utils": { + "version": "10.29.0", + "resolved": "https://registry.npmjs.org/@sentry-internal/browser-utils/-/browser-utils-10.29.0.tgz", + "integrity": "sha512-M3kycMY6f3KY9a8jDYac+yG0E3ZgWVWSxlOEC5MhYyX+g7mqxkwrb3LFQyuxSm/m+CCgMTCaPOOaB2twXP6EQg==", + "license": "MIT", + "dependencies": { + "@sentry/core": "10.29.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@sentry-internal/feedback": { + "version": "10.29.0", + "resolved": "https://registry.npmjs.org/@sentry-internal/feedback/-/feedback-10.29.0.tgz", + "integrity": "sha512-Y7IRsNeS99cEONu1mZWZc3HvbjNnu59Hgymm0swFFKbdgbCgdT6l85kn2oLsuq4Ew8Dw/pL/Sgpwsl9UgYFpUg==", + "license": "MIT", + "dependencies": { + "@sentry/core": "10.29.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@sentry-internal/replay": { + "version": "10.29.0", + "resolved": "https://registry.npmjs.org/@sentry-internal/replay/-/replay-10.29.0.tgz", + "integrity": "sha512-45NVw9PwB9TQ8z+xJ6G6Za+wmQ1RTA35heBSzR6U4bknj8LmA04k2iwnobvxCBEQXeLfcJEO1vFgagMoqMZMBw==", + "license": "MIT", + "dependencies": { + "@sentry-internal/browser-utils": "10.29.0", + "@sentry/core": "10.29.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@sentry-internal/replay-canvas": { + "version": "10.29.0", + "resolved": "https://registry.npmjs.org/@sentry-internal/replay-canvas/-/replay-canvas-10.29.0.tgz", + "integrity": "sha512-typY4JrpAQQGPuSyd/BD8+nNCbvTV2UVvKzr+iKgI0m1qc4Dz8tHZ4Nfais2Z8eYn/pL1kqVQN5ERTmJoYFdIw==", + "license": "MIT", + "dependencies": { + "@sentry-internal/replay": "10.29.0", + "@sentry/core": "10.29.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@sentry/browser": { + "version": "10.29.0", + "resolved": "https://registry.npmjs.org/@sentry/browser/-/browser-10.29.0.tgz", + "integrity": "sha512-XdbyIR6F4qoR9Z1JCWTgunVcTJjS9p2Th+v4wYs4ME+ZdLC4tuKKmRgYg3YdSIWCn1CBfIgdI6wqETSf7H6Njw==", + "license": "MIT", + "dependencies": { + "@sentry-internal/browser-utils": "10.29.0", + "@sentry-internal/feedback": "10.29.0", + "@sentry-internal/replay": "10.29.0", + "@sentry-internal/replay-canvas": "10.29.0", + "@sentry/core": "10.29.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@sentry/core": { + "version": "10.29.0", + "resolved": "https://registry.npmjs.org/@sentry/core/-/core-10.29.0.tgz", + "integrity": "sha512-olQ2DU9dA/Bwsz3PtA9KNXRMqBWRQSkPw+MxwWEoU1K1qtiM9L0j6lbEFb5iSY3d7WYD5MB+1d5COugjSBrHtw==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@sentry/electron": { + "version": "7.5.0", + "resolved": "https://registry.npmjs.org/@sentry/electron/-/electron-7.5.0.tgz", + "integrity": "sha512-88t/YsB5iO75faKdd7lIuJkwp9FGKgFlkDuaSJhsJiVcjlywkn8CwUbctAbS0gu6Suc0raHCF4ULvGyksKAoww==", + "license": "MIT", + "dependencies": { + "@sentry/browser": "10.29.0", + "@sentry/core": "10.29.0", + "@sentry/node": "10.29.0" + }, + "peerDependencies": { + "@sentry/node-native": "10.29.0" + }, + "peerDependenciesMeta": { + "@sentry/node-native": { + "optional": true + } + } + }, + "node_modules/@sentry/node": { + "version": "10.29.0", + "resolved": "https://registry.npmjs.org/@sentry/node/-/node-10.29.0.tgz", + "integrity": "sha512-9j8VzV06VCj+H8tlxpfa7BNN4HzH5exv68WOufdMTXzzWLOXnzrdNDoYplm1G2S3LMvWsc1SVI3a8A0yBY7oWg==", + "license": "MIT", + "dependencies": { + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/context-async-hooks": "^2.2.0", + "@opentelemetry/core": "^2.2.0", + "@opentelemetry/instrumentation": "^0.208.0", + "@opentelemetry/instrumentation-amqplib": "0.55.0", + "@opentelemetry/instrumentation-connect": "0.52.0", + "@opentelemetry/instrumentation-dataloader": "0.26.0", + "@opentelemetry/instrumentation-express": "0.57.0", + "@opentelemetry/instrumentation-fs": "0.28.0", + "@opentelemetry/instrumentation-generic-pool": "0.52.0", + "@opentelemetry/instrumentation-graphql": "0.56.0", + "@opentelemetry/instrumentation-hapi": "0.55.0", + "@opentelemetry/instrumentation-http": "0.208.0", + "@opentelemetry/instrumentation-ioredis": "0.56.0", + "@opentelemetry/instrumentation-kafkajs": "0.18.0", + "@opentelemetry/instrumentation-knex": "0.53.0", + "@opentelemetry/instrumentation-koa": "0.57.0", + "@opentelemetry/instrumentation-lru-memoizer": "0.53.0", + "@opentelemetry/instrumentation-mongodb": "0.61.0", + "@opentelemetry/instrumentation-mongoose": "0.55.0", + "@opentelemetry/instrumentation-mysql": "0.54.0", + "@opentelemetry/instrumentation-mysql2": "0.55.0", + "@opentelemetry/instrumentation-pg": "0.61.0", + "@opentelemetry/instrumentation-redis": "0.57.0", + "@opentelemetry/instrumentation-tedious": "0.27.0", + "@opentelemetry/instrumentation-undici": "0.19.0", + "@opentelemetry/resources": "^2.2.0", + "@opentelemetry/sdk-trace-base": "^2.2.0", + "@opentelemetry/semantic-conventions": "^1.37.0", + "@prisma/instrumentation": "6.19.0", + "@sentry/core": "10.29.0", + "@sentry/node-core": "10.29.0", + "@sentry/opentelemetry": "10.29.0", + "import-in-the-middle": "^2", + "minimatch": "^9.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@sentry/node-core": { + "version": "10.29.0", + "resolved": "https://registry.npmjs.org/@sentry/node-core/-/node-core-10.29.0.tgz", + "integrity": "sha512-f/Y0okHhPPb5HnYNBqCivJ2YuXtSadvcIx16dzU5mHQxZhgGednUCPEX7rsvPcd4HneQz12HKLqxbAmNu+b3FA==", + "license": "MIT", + "dependencies": { + "@apm-js-collab/tracing-hooks": "^0.3.1", + "@sentry/core": "10.29.0", + "@sentry/opentelemetry": "10.29.0", + "import-in-the-middle": "^2" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/context-async-hooks": "^1.30.1 || ^2.1.0 || ^2.2.0", + "@opentelemetry/core": "^1.30.1 || ^2.1.0 || ^2.2.0", + "@opentelemetry/instrumentation": ">=0.57.1 <1", + "@opentelemetry/resources": "^1.30.1 || ^2.1.0 || ^2.2.0", + "@opentelemetry/sdk-trace-base": "^1.30.1 || ^2.1.0 || ^2.2.0", + "@opentelemetry/semantic-conventions": "^1.37.0" + } + }, + "node_modules/@sentry/node/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@sentry/node/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@sentry/opentelemetry": { + "version": "10.29.0", + "resolved": "https://registry.npmjs.org/@sentry/opentelemetry/-/opentelemetry-10.29.0.tgz", + "integrity": "sha512-5QvtAwS73HlI/+OTF1poAFELzsc0se+PHmMsXGGrOeNBvjCr3ZE8qvke09aeMn7uRImf3Nc9J6i2KtSHJnbKPA==", + "license": "MIT", + "dependencies": { + "@sentry/core": "10.29.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/context-async-hooks": "^1.30.1 || ^2.1.0 || ^2.2.0", + "@opentelemetry/core": "^1.30.1 || ^2.1.0 || ^2.2.0", + "@opentelemetry/sdk-trace-base": "^1.30.1 || ^2.1.0 || ^2.2.0", + "@opentelemetry/semantic-conventions": "^1.37.0" + } + }, + "node_modules/@sindresorhus/is": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", + "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@szmarczak/http-timer": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-4.0.6.tgz", + "integrity": "sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w==", + "dev": true, + "license": "MIT", + "dependencies": { + "defer-to-connect": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@tailwindcss/node": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.18.tgz", + "integrity": "sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.4", + "enhanced-resolve": "^5.18.3", + "jiti": "^2.6.1", + "lightningcss": "1.30.2", + "magic-string": "^0.30.21", + "source-map-js": "^1.2.1", + "tailwindcss": "4.1.18" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.18.tgz", + "integrity": "sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-x64": "4.1.18", + "@tailwindcss/oxide-freebsd-x64": "4.1.18", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.18", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.18", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-x64-musl": "4.1.18", + "@tailwindcss/oxide-wasm32-wasi": "4.1.18", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.18", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.18" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.18.tgz", + "integrity": "sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.18.tgz", + "integrity": "sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.18.tgz", + "integrity": "sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.18.tgz", + "integrity": "sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.18.tgz", + "integrity": "sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.18.tgz", + "integrity": "sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.18.tgz", + "integrity": "sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.18.tgz", + "integrity": "sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.18.tgz", + "integrity": "sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.18.tgz", + "integrity": "sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@emnapi/wasi-threads": "^1.1.0", + "@napi-rs/wasm-runtime": "^1.1.0", + "@tybys/wasm-util": "^0.10.1", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.18.tgz", + "integrity": "sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.18.tgz", + "integrity": "sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@tailwindcss/postcss": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.18.tgz", + "integrity": "sha512-Ce0GFnzAOuPyfV5SxjXGn0CubwGcuDB0zcdaPuCSzAa/2vII24JTkH+I6jcbXLb1ctjZMZZI6OjDaLPJQL1S0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "@tailwindcss/node": "4.1.18", + "@tailwindcss/oxide": "4.1.18", + "postcss": "^8.4.41", + "tailwindcss": "4.1.18" + } + }, + "node_modules/@tailwindcss/typography": { + "version": "0.5.19", + "resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.19.tgz", + "integrity": "sha512-w31dd8HOx3k9vPtcQh5QHP9GwKcgbMp87j58qi6xgiBnFFtKEAgCWnDw4qUT8aHwkCp8bKvb/KGKWWHedP0AAg==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "6.0.10" + }, + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1" + } + }, + "node_modules/@tanstack/react-virtual": { + "version": "3.13.14", + "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.14.tgz", + "integrity": "sha512-WG0d7mBD54eA7dgA3+sO5csS0B49QKqM6Gy5Rf31+Oq/LTKROQSao9m2N/vz1IqVragOKU5t5k1LAcqh/DfTxw==", + "license": "MIT", + "dependencies": { + "@tanstack/virtual-core": "3.13.14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@tanstack/virtual-core": { + "version": "3.13.14", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.14.tgz", + "integrity": "sha512-b5Uvd8J2dc7ICeX9SRb/wkCxWk7pUwN214eEPAQsqrsktSKTCmyLxOQWSMgogBByXclZeAdgZ3k4o0fIYUIBqQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@testing-library/dom": { + "version": "10.4.1", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz", + "integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/runtime": "^7.12.5", + "@types/aria-query": "^5.0.1", + "aria-query": "5.3.0", + "dom-accessibility-api": "^0.5.9", + "lz-string": "^1.5.0", + "picocolors": "1.1.1", + "pretty-format": "^27.0.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@testing-library/jest-dom": { + "version": "6.9.1", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.9.1.tgz", + "integrity": "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "^4.4.0", + "aria-query": "^5.0.0", + "css.escape": "^1.5.1", + "dom-accessibility-api": "^0.6.3", + "picocolors": "^1.1.1", + "redent": "^3.0.0" + }, + "engines": { + "node": ">=14", + "npm": ">=6", + "yarn": ">=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz", + "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@testing-library/react": { + "version": "16.3.1", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.1.tgz", + "integrity": "sha512-gr4KtAWqIOQoucWYD/f6ki+j5chXfcPc74Col/6poTyqTmn7zRmodWahWRCp8tYd+GMqBonw6hstNzqjbs6gjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@testing-library/dom": "^10.0.0", + "@types/react": "^18.0.0 || ^19.0.0", + "@types/react-dom": "^18.0.0 || ^19.0.0", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/@types/aria-query": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", + "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/cacheable-request": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/@types/cacheable-request/-/cacheable-request-6.0.3.tgz", + "integrity": "sha512-IQ3EbTzGxIigb1I3qPZc1rWJnH0BmSKv5QYTalEwweFvyBDLSAe24zP0le/hyi7ecGfZVlIVAg4BZqb8WBwKqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/http-cache-semantics": "*", + "@types/keyv": "^3.1.4", + "@types/node": "*", + "@types/responselike": "^1.0.0" + } + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/fs-extra": { + "version": "9.0.13", + "resolved": "https://registry.npmjs.org/@types/fs-extra/-/fs-extra-9.0.13.tgz", + "integrity": "sha512-nEnwB++1u5lVDM2UI4c1+5R+FYaKfaAzS4OococimjVm3nQw3TuzH5UNsocrcTBbhnerblyHj4A49qXbIiZdpA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/keyv": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/@types/keyv/-/keyv-3.1.4.tgz", + "integrity": "sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/minimatch": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-5.1.2.tgz", + "integrity": "sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, + "node_modules/@types/mysql": { + "version": "2.15.27", + "resolved": "https://registry.npmjs.org/@types/mysql/-/mysql-2.15.27.tgz", + "integrity": "sha512-YfWiV16IY0OeBfBCk8+hXKmdTKrKlwKN1MNKAPBu5JYxLwBEZl7QzeEpGnlZb3VMGJrrGmB84gXiH+ofs/TezA==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/node": { + "version": "25.0.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.3.tgz", + "integrity": "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA==", + "license": "MIT", + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@types/pg": { + "version": "8.15.6", + "resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.15.6.tgz", + "integrity": "sha512-NoaMtzhxOrubeL/7UZuNTrejB4MPAJ0RpxZqXQf2qXuVlTPuG6Y8p4u9dKRaue4yjmC7ZhzVO2/Yyyn25znrPQ==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "pg-protocol": "*", + "pg-types": "^2.2.0" + } + }, + "node_modules/@types/pg-pool": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/pg-pool/-/pg-pool-2.0.6.tgz", + "integrity": "sha512-TaAUE5rq2VQYxab5Ts7WZhKNmuN78Q6PiFonTDdpbx8a1H0M1vhy3rhiMjl+e2iHmogyMw7jZF4FrE6eJUy5HQ==", + "license": "MIT", + "dependencies": { + "@types/pg": "*" + } + }, + "node_modules/@types/plist": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@types/plist/-/plist-3.0.5.tgz", + "integrity": "sha512-E6OCaRmAe4WDmWNsL/9RMqdkkzDCY1etutkflWk4c+AcjDU07Pcz1fQwTX0TQz+Pxqn9i4L1TU3UFpjnrcDgxA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@types/node": "*", + "xmlbuilder": ">=11.0.1" + } + }, + "node_modules/@types/react": { + "version": "19.2.7", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz", + "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==", + "license": "MIT", + "peer": true, + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "devOptional": true, + "license": "MIT", + "peer": true, + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@types/responselike": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@types/responselike/-/responselike-1.0.3.tgz", + "integrity": "sha512-H/+L+UkTV33uf49PH5pCAUBVPNj2nDBXTN+qS1dOwyyg24l3CcicicCA7ca+HMvJBZcFgl5r8e+RR6elsb4Lyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-FmgJfu+MOcQ370SD0ev7EI8TlCAfKYU+B4m5T3yXc1CiRN94g/SZPtsCkk506aUDtlMnFZvasDwHHUcZUEaYuA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/tedious": { + "version": "4.0.14", + "resolved": "https://registry.npmjs.org/@types/tedious/-/tedious-4.0.14.tgz", + "integrity": "sha512-KHPsfX/FoVbUGbyYvk1q9MMQHLPeRZhRJZdO45Q4YjvFkv4hMNghCWTvy7rdKessBsmtz4euWCWAB6/tVpI1Iw==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@types/uuid": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", + "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/verror": { + "version": "1.10.11", + "resolved": "https://registry.npmjs.org/@types/verror/-/verror-1.10.11.tgz", + "integrity": "sha512-RlDm9K7+o5stv0Co8i8ZRGxDbrTxhJtgjqjFyVh/tXQyl/rYtTKlnTvZ88oSTeYREWurwx20Js4kTuKCsFkUtg==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/@types/yauzl": { + "version": "2.10.3", + "resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.3.tgz", + "integrity": "sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.51.0.tgz", + "integrity": "sha512-XtssGWJvypyM2ytBnSnKtHYOGT+4ZwTnBVl36TA4nRO2f4PRNGz5/1OszHzcZCvcBMh+qb7I06uoCmLTRdR9og==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.51.0", + "@typescript-eslint/type-utils": "8.51.0", + "@typescript-eslint/utils": "8.51.0", + "@typescript-eslint/visitor-keys": "8.51.0", + "ignore": "^7.0.0", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.51.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.51.0.tgz", + "integrity": "sha512-3xP4XzzDNQOIqBMWogftkwxhg5oMKApqY0BAflmLZiFYHqyhSOxv/cd/zPQLTcCXr4AkaKb25joocY0BD1WC6A==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@typescript-eslint/scope-manager": "8.51.0", + "@typescript-eslint/types": "8.51.0", + "@typescript-eslint/typescript-estree": "8.51.0", + "@typescript-eslint/visitor-keys": "8.51.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.51.0.tgz", + "integrity": "sha512-Luv/GafO07Z7HpiI7qeEW5NW8HUtZI/fo/kE0YbtQEFpJRUuR0ajcWfCE5bnMvL7QQFrmT/odMe8QZww8X2nfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.51.0", + "@typescript-eslint/types": "^8.51.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.51.0.tgz", + "integrity": "sha512-JhhJDVwsSx4hiOEQPeajGhCWgBMBwVkxC/Pet53EpBVs7zHHtayKefw1jtPaNRXpI9RA2uocdmpdfE7T+NrizA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.51.0", + "@typescript-eslint/visitor-keys": "8.51.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.51.0.tgz", + "integrity": "sha512-Qi5bSy/vuHeWyir2C8u/uqGMIlIDu8fuiYWv48ZGlZ/k+PRPHtaAu7erpc7p5bzw2WNNSniuxoMSO4Ar6V9OXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.51.0.tgz", + "integrity": "sha512-0XVtYzxnobc9K0VU7wRWg1yiUrw4oQzexCG2V2IDxxCxhqBMSMbjB+6o91A+Uc0GWtgjCa3Y8bi7hwI0Tu4n5Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.51.0", + "@typescript-eslint/typescript-estree": "8.51.0", + "@typescript-eslint/utils": "8.51.0", + "debug": "^4.3.4", + "ts-api-utils": "^2.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.51.0.tgz", + "integrity": "sha512-TizAvWYFM6sSscmEakjY3sPqGwxZRSywSsPEiuZF6d5GmGD9Gvlsv0f6N8FvAAA0CD06l3rIcWNbsN1e5F/9Ag==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.51.0.tgz", + "integrity": "sha512-1qNjGqFRmlq0VW5iVlcyHBbCjPB7y6SxpBkrbhNWMy/65ZoncXCEPJxkRZL8McrseNH6lFhaxCIaX+vBuFnRng==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.51.0", + "@typescript-eslint/tsconfig-utils": "8.51.0", + "@typescript-eslint/types": "8.51.0", + "@typescript-eslint/visitor-keys": "8.51.0", + "debug": "^4.3.4", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.51.0.tgz", + "integrity": "sha512-11rZYxSe0zabiKaCP2QAwRf/dnmgFgvTmeDTtZvUvXG3UuAdg/GU02NExmmIXzz3vLGgMdtrIosI84jITQOxUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.51.0", + "@typescript-eslint/types": "8.51.0", + "@typescript-eslint/typescript-estree": "8.51.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.51.0.tgz", + "integrity": "sha512-mM/JRQOzhVN1ykejrvwnBRV3+7yTKK8tVANVN3o1O0t0v7o+jqdVu9crPy5Y9dov15TJk/FTIgoUGHrTOVL3Zg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.51.0", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/@vitejs/plugin-react": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.1.2.tgz", + "integrity": "sha512-EcA07pHJouywpzsoTUqNh5NwGayl2PPVEJKUSinGGSxFGYn+shYbqMGBg6FXDqgXum9Ou/ecb+411ssw8HImJQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.5", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.53", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.18.0" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/@vitest/expect": { + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.16.tgz", + "integrity": "sha512-eshqULT2It7McaJkQGLkPjPjNph+uevROGuIMJdG3V+0BSR2w9u6J9Lwu+E8cK5TETlfou8GRijhafIMhXsimA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@standard-schema/spec": "^1.0.0", + "@types/chai": "^5.2.2", + "@vitest/spy": "4.0.16", + "@vitest/utils": "4.0.16", + "chai": "^6.2.1", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.16.tgz", + "integrity": "sha512-yb6k4AZxJTB+q9ycAvsoxGn+j/po0UaPgajllBgt1PzoMAAmJGYFdDk0uCcRcxb3BrME34I6u8gHZTQlkqSZpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "4.0.16", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.21" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.16.tgz", + "integrity": "sha512-eNCYNsSty9xJKi/UdVD8Ou16alu7AYiS2fCPRs0b1OdhJiV89buAXQLpTbe+X8V9L6qrs9CqyvU7OaAopJYPsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.16.tgz", + "integrity": "sha512-VWEDm5Wv9xEo80ctjORcTQRJ539EGPB3Pb9ApvVRAY1U/WkHXmmYISqU5E79uCwcW7xYUV38gwZD+RV755fu3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "4.0.16", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.16.tgz", + "integrity": "sha512-sf6NcrYhYBsSYefxnry+DR8n3UV4xWZwWxYbCJUt2YdvtqzSPR7VfGrY0zsv090DAbjFZsi7ZaMi1KnSRyK1XA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.16", + "magic-string": "^0.30.21", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.16.tgz", + "integrity": "sha512-4jIOWjKP0ZUaEmJm00E0cOBLU+5WE0BpeNr3XN6TEF05ltro6NJqHWxXD0kA8/Zc8Nh23AT8WQxwNG+WeROupw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.16.tgz", + "integrity": "sha512-h8z9yYhV3e1LEfaQ3zdypIrnAg/9hguReGZoS7Gl0aBG5xgA410zBqECqmaF/+RkTggRsfnzc1XaAHA6bmUufA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.16", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@xmldom/xmldom": { + "version": "0.8.11", + "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.11.tgz", + "integrity": "sha512-cQzWCtO6C8TQiYl1ruKNn2U6Ao4o4WBBcbL61yJl84x+j5sOWWFU9X7DpND8XZG3daDppSsigMdfAIl2upQBRw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@xterm/addon-fit": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.11.0.tgz", + "integrity": "sha512-jYcgT6xtVYhnhgxh3QgYDnnNMYTcf8ElbxxFzX0IZo+vabQqSPAjC3c1wJrKB5E19VwQei89QCiZZP86DCPF7g==", + "license": "MIT" + }, + "node_modules/@xterm/addon-serialize": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-serialize/-/addon-serialize-0.14.0.tgz", + "integrity": "sha512-uteyTU1EkrQa2Ux6P/uFl2fzmXI46jy5uoQMKEOM0fKTyiW7cSn0WrFenHm5vO5uEXX/GpwW/FgILvv3r0WbkA==", + "license": "MIT" + }, + "node_modules/@xterm/addon-web-links": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-web-links/-/addon-web-links-0.12.0.tgz", + "integrity": "sha512-4Smom3RPyVp7ZMYOYDoC/9eGJJJqYhnPLGGqJ6wOBfB8VxPViJNSKdgRYb8NpaM6YSelEKbA2SStD7lGyqaobw==", + "license": "MIT" + }, + "node_modules/@xterm/addon-webgl": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/@xterm/addon-webgl/-/addon-webgl-0.19.0.tgz", + "integrity": "sha512-b3fMOsyLVuCeNJWxolACEUED0vm7qC0cy4wRvf3oURSzDTYVQiGPhTnhWZwIHdvC48Y+oLhvYXnY4XDXPoJo6A==", + "license": "MIT" + }, + "node_modules/@xterm/xterm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-6.0.0.tgz", + "integrity": "sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg==", + "license": "MIT", + "workspaces": [ + "addons/*" + ] + }, + "node_modules/7zip-bin": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/7zip-bin/-/7zip-bin-5.2.0.tgz", + "integrity": "sha512-ukTPVhqG4jNzMro2qA9HSCSSVJN3aN7tlb+hfqYCt3ER0yWroeA2VR38MNrOHLQ/cVj+DaIMad0kFCtWWowh/A==", + "dev": true, + "license": "MIT" + }, + "node_modules/abbrev": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz", + "integrity": "sha512-AO2ac6pjRB3SJmGJo+v5/aK6Omggp6fsLrs6wN9bd35ulu4cCwaAU9+7ZhXjeqHVkaHThLuzH0nZr0YpCDhygg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-attributes": { + "version": "1.9.5", + "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", + "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", + "license": "MIT", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/agentkeepalive": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", + "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 8.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/ansi-escapes": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.2.0.tgz", + "integrity": "sha512-g6LhBsl+GBPRWGWsBtutpzBYuIIdBkLEvad5C/va/74Db018+5TZiyA26cZJAr3Rft5lprVqOIPxf5Vid6tqAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "environment": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/app-builder-bin": { + "version": "5.0.0-alpha.12", + "resolved": "https://registry.npmjs.org/app-builder-bin/-/app-builder-bin-5.0.0-alpha.12.tgz", + "integrity": "sha512-j87o0j6LqPL3QRr8yid6c+Tt5gC7xNfYo6uQIQkorAC6MpeayVMZrEDzKmJJ/Hlv7EnOQpaRm53k6ktDYZyB6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/app-builder-lib": { + "version": "26.0.12", + "resolved": "https://registry.npmjs.org/app-builder-lib/-/app-builder-lib-26.0.12.tgz", + "integrity": "sha512-+/CEPH1fVKf6HowBUs6LcAIoRcjeqgvAeoSE+cl7Y7LndyQ9ViGPYibNk7wmhMHzNgHIuIbw4nWADPO+4mjgWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@develar/schema-utils": "~2.6.5", + "@electron/asar": "3.2.18", + "@electron/fuses": "^1.8.0", + "@electron/notarize": "2.5.0", + "@electron/osx-sign": "1.3.1", + "@electron/rebuild": "3.7.0", + "@electron/universal": "2.0.1", + "@malept/flatpak-bundler": "^0.4.0", + "@types/fs-extra": "9.0.13", + "async-exit-hook": "^2.0.1", + "builder-util": "26.0.11", + "builder-util-runtime": "9.3.1", + "chromium-pickle-js": "^0.2.0", + "config-file-ts": "0.2.8-rc1", + "debug": "^4.3.4", + "dotenv": "^16.4.5", + "dotenv-expand": "^11.0.6", + "ejs": "^3.1.8", + "electron-publish": "26.0.11", + "fs-extra": "^10.1.0", + "hosted-git-info": "^4.1.0", + "is-ci": "^3.0.0", + "isbinaryfile": "^5.0.0", + "js-yaml": "^4.1.0", + "json5": "^2.2.3", + "lazy-val": "^1.0.5", + "minimatch": "^10.0.0", + "plist": "3.1.0", + "resedit": "^1.7.0", + "semver": "^7.3.8", + "tar": "^6.1.12", + "temp-file": "^3.4.0", + "tiny-async-pool": "1.3.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "dmg-builder": "26.0.12", + "electron-builder-squirrel-windows": "26.0.12" + } + }, + "node_modules/app-builder-lib/node_modules/@electron/rebuild": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-3.7.0.tgz", + "integrity": "sha512-VW++CNSlZwMYP7MyXEbrKjpzEwhB5kDNbzGtiPEjwYysqyTCF+YbNJ210Dj3AjWsGSV4iEEwNkmJN9yGZmVvmw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@electron/node-gyp": "git+https://github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2", + "@malept/cross-spawn-promise": "^2.0.0", + "chalk": "^4.0.0", + "debug": "^4.1.1", + "detect-libc": "^2.0.1", + "fs-extra": "^10.0.0", + "got": "^11.7.0", + "node-abi": "^3.45.0", + "node-api-version": "^0.2.0", + "ora": "^5.1.0", + "read-binary-file-arch": "^1.0.6", + "semver": "^7.3.5", + "tar": "^6.0.5", + "yargs": "^17.0.1" + }, + "bin": { + "electron-rebuild": "lib/cli.js" + }, + "engines": { + "node": ">=12.13.0" + } + }, + "node_modules/app-builder-lib/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/app-builder-lib/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/app-builder-lib/node_modules/node-abi": { + "version": "3.85.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.85.0.tgz", + "integrity": "sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/app-builder-lib/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/aria-hidden": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.6.tgz", + "integrity": "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz", + "integrity": "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "is-array-buffer": "^3.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-includes": { + "version": "3.1.9", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.9.tgz", + "integrity": "sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.24.0", + "es-object-atoms": "^1.1.1", + "get-intrinsic": "^1.3.0", + "is-string": "^1.1.1", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlast": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", + "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.3.tgz", + "integrity": "sha512-rwG/ja1neyLqCuGZ5YYrznA62D4mZXg0i1cIskIUKSiqF3Cje9/wXAls9B9s1Wa2fomMsIv8czB8jZcPmxCXFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.3.tgz", + "integrity": "sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", + "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz", + "integrity": "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "dev": true, + "license": "MIT" + }, + "node_modules/async-exit-hook": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/async-exit-hook/-/async-exit-hook-2.0.1.tgz", + "integrity": "sha512-NW2cX8m1Q7KPA7a5M2ULQeZ2wR5qI5PAbw5L0UOMxdioVk9PMZ0h1TmyZEkPYrCvYjDlFICusOu1dlEKAAeXBw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/async-function": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", + "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/auto-claude-ui": { + "resolved": "apps/frontend", + "link": true + }, + "node_modules/autoprefixer": { + "version": "10.4.23", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.23.tgz", + "integrity": "sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001760", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.11", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.11.tgz", + "integrity": "sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/bidi-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz", + "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "require-from-string": "^2.0.2" + } + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/boolean": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/boolean/-/boolean-3.2.0.tgz", + "integrity": "sha512-d0II/GO9uf9lfUHH2BQsjxzRJZBdsjgsBiW4BvhWk/3qoKwQFjIDVN19PfX8F2D/r9PCMTtLWjYVCFrpeYUzsw==", + "deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/builder-util": { + "version": "26.0.11", + "resolved": "https://registry.npmjs.org/builder-util/-/builder-util-26.0.11.tgz", + "integrity": "sha512-xNjXfsldUEe153h1DraD0XvDOpqGR0L5eKFkdReB7eFW5HqysDZFfly4rckda6y9dF39N3pkPlOblcfHKGw+uA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/debug": "^4.1.6", + "7zip-bin": "~5.2.0", + "app-builder-bin": "5.0.0-alpha.12", + "builder-util-runtime": "9.3.1", + "chalk": "^4.1.2", + "cross-spawn": "^7.0.6", + "debug": "^4.3.4", + "fs-extra": "^10.1.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "is-ci": "^3.0.0", + "js-yaml": "^4.1.0", + "sanitize-filename": "^1.6.3", + "source-map-support": "^0.5.19", + "stat-mode": "^1.0.0", + "temp-file": "^3.4.0", + "tiny-async-pool": "1.3.0" + } + }, + "node_modules/builder-util-runtime": { + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/builder-util-runtime/-/builder-util-runtime-9.3.1.tgz", + "integrity": "sha512-2/egrNDDnRaxVwK3A+cJq6UOlqOdedGA7JPqCeJjN2Zjk1/QB/6QUi3b714ScIGS7HafFXTyzJEOr5b44I3kvQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.3.4", + "sax": "^1.2.4" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/builder-util/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/builder-util/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/builder-util/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cacache": { + "version": "19.0.1", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-19.0.1.tgz", + "integrity": "sha512-hdsUxulXCi5STId78vRVYEtDAjq99ICAUktLTeTYsLoTE6Z8dS0c8pWNCxwdrk9YfJeobDZc2Y186hD/5ZQgFQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^4.0.0", + "fs-minipass": "^3.0.0", + "glob": "^10.2.2", + "lru-cache": "^10.0.1", + "minipass": "^7.0.3", + "minipass-collect": "^2.0.1", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "p-map": "^7.0.2", + "ssri": "^12.0.0", + "tar": "^7.4.3", + "unique-filename": "^4.0.0" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/cacache/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/cacache/node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/cacache/node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/cacache/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/cacache/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/cacache/node_modules/tar": { + "version": "7.5.2", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.2.tgz", + "integrity": "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.1.0", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/cacache/node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/cacheable-lookup": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz", + "integrity": "sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.6.0" + } + }, + "node_modules/cacheable-request": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-7.0.4.tgz", + "integrity": "sha512-v+p6ongsrp0yTGbJXjgxPow2+DL93DASP4kXCDKb8/bwRtt9OEF3whggkkDkGNzgcWy2XaF4a8nZglC7uElscg==", + "dev": true, + "license": "MIT", + "dependencies": { + "clone-response": "^1.0.2", + "get-stream": "^5.1.0", + "http-cache-semantics": "^4.0.0", + "keyv": "^4.0.0", + "lowercase-keys": "^2.0.0", + "normalize-url": "^6.0.1", + "responselike": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001762", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001762.tgz", + "integrity": "sha512-PxZwGNvH7Ak8WX5iXzoK1KPZttBXNPuaOvI2ZYU7NrlM+d9Ov+TUvlLOBNGzVXAntMSMMlJPd+jY6ovrVjSmUw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chai": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", + "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chokidar": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-5.0.0.tgz", + "integrity": "sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==", + "license": "MIT", + "dependencies": { + "readdirp": "^5.0.0" + }, + "engines": { + "node": ">= 20.19.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/chromium-pickle-js": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/chromium-pickle-js/-/chromium-pickle-js-0.2.0.tgz", + "integrity": "sha512-1R5Fho+jBq0DDydt+/vHWj5KJNJCKdARKOCwZUen84I5BreWoLqRLANH1U87eJy1tiASPtMnGqJJq0ZsLoRPOw==", + "dev": true, + "license": "MIT" + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "license": "MIT" + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-cursor": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz", + "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-truncate": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-2.1.0.tgz", + "integrity": "sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "slice-ansi": "^3.0.0", + "string-width": "^4.2.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/clone-response": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz", + "integrity": "sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-response": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/compare-version": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/compare-version/-/compare-version-0.1.2.tgz", + "integrity": "sha512-pJDh5/4wrEnXX/VWRZvruAGHkzKdr46z11OlTPN+VrATlWWhSKewNCJ1futCO5C7eJB3nPMFZA1LeYtcFboZ2A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/config-file-ts": { + "version": "0.2.8-rc1", + "resolved": "https://registry.npmjs.org/config-file-ts/-/config-file-ts-0.2.8-rc1.tgz", + "integrity": "sha512-GtNECbVI82bT4RiDIzBSVuTKoSHufnU7Ce7/42bkWZJZFLjmDF2WBpVsvRkhKCfKBnTBb3qZrBwPpFBU/Myvhg==", + "dev": true, + "license": "MIT", + "dependencies": { + "glob": "^10.3.12", + "typescript": "^5.4.3" + } + }, + "node_modules/config-file-ts/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/config-file-ts/node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/config-file-ts/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/crc": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/crc/-/crc-3.8.0.tgz", + "integrity": "sha512-iX3mfgcTMIq3ZKLIsVFAbv7+Mc10kxabAGQb8HvjA1o3T1PIYprbakQ65d3I+2HGHt6nSKkM9PYjgoJO2KcFBQ==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "buffer": "^5.1.0" + } + }, + "node_modules/cross-dirname": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/cross-dirname/-/cross-dirname-0.1.0.tgz", + "integrity": "sha512-+R08/oI0nl3vfPcqftZRpytksBXDzOUveBq/NBVx0sUp1axwzPQrKinNx5yd5sxPu8j1wIy8AfnVQ+5eFdha6Q==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/cross-env": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/cross-env/-/cross-env-10.1.0.tgz", + "integrity": "sha512-GsYosgnACZTADcmEyJctkJIoqAhHjttw7RsFrVoJNXbsWWqaq6Ym+7kZjq6mS45O0jij6vtiReppKQEtqWy6Dw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@epic-web/invariant": "^1.0.0", + "cross-spawn": "^7.0.6" + }, + "bin": { + "cross-env": "dist/bin/cross-env.js", + "cross-env-shell": "dist/bin/cross-env-shell.js" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/css-tree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz", + "integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.12.2", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssstyle": { + "version": "5.3.6", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.6.tgz", + "integrity": "sha512-legscpSpgSAeGEe0TNcai97DKt9Vd9AsAdOL7Uoetb52Ar/8eJm3LIa39qpv8wWzLFlNG4vVvppQM+teaMPj3A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/css-color": "^4.1.1", + "@csstools/css-syntax-patches-for-csstree": "^1.0.21", + "css-tree": "^3.1.0", + "lru-cache": "^11.2.4" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/cssstyle/node_modules/lru-cache": { + "version": "11.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", + "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/data-urls": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-6.0.0.tgz", + "integrity": "sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^15.0.0" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/data-view-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz", + "integrity": "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz", + "integrity": "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/inspect-js" + } + }, + "node_modules/data-view-byte-offset": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz", + "integrity": "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", + "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", + "dev": true, + "license": "MIT" + }, + "node_modules/decode-named-character-reference": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz", + "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", + "license": "MIT" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dir-compare": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/dir-compare/-/dir-compare-4.2.0.tgz", + "integrity": "sha512-2xMCmOoMrdQIPHdsTawECdNPwlVFB9zGcz3kuhmBO6U3oU+UQjsue0i8ayLKpgBcm+hcXPMVSGUN9d+pvJ6+VQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimatch": "^3.0.5", + "p-limit": "^3.1.0 " + } + }, + "node_modules/dir-compare/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/dmg-builder": { + "version": "26.0.12", + "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-26.0.12.tgz", + "integrity": "sha512-59CAAjAhTaIMCN8y9kD573vDkxbs1uhDcrFLHSgutYdPcGOU35Rf95725snvzEOy4BFB7+eLJ8djCNPmGwG67w==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "app-builder-lib": "26.0.12", + "builder-util": "26.0.11", + "builder-util-runtime": "9.3.1", + "fs-extra": "^10.1.0", + "iconv-lite": "^0.6.2", + "js-yaml": "^4.1.0" + }, + "optionalDependencies": { + "dmg-license": "^1.0.11" + } + }, + "node_modules/dmg-builder/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/dmg-builder/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/dmg-builder/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/dmg-license": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/dmg-license/-/dmg-license-1.0.11.tgz", + "integrity": "sha512-ZdzmqwKmECOWJpqefloC5OJy1+WZBBse5+MR88z9g9Zn4VY+WYUkAyojmhzJckH5YbbZGcYIuGAkY5/Ys5OM2Q==", + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "dependencies": { + "@types/plist": "^3.0.1", + "@types/verror": "^1.10.3", + "ajv": "^6.10.0", + "crc": "^3.8.0", + "iconv-corefoundation": "^1.1.7", + "plist": "^3.0.4", + "smart-buffer": "^4.0.2", + "verror": "^1.10.0" + }, + "bin": { + "dmg-license": "bin/dmg-license.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/dom-accessibility-api": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", + "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", + "dev": true, + "license": "MIT" + }, + "node_modules/dotenv": { + "version": "16.6.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", + "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dotenv-expand": { + "version": "11.0.7", + "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-11.0.7.tgz", + "integrity": "sha512-zIHwmZPRshsCdpMDyVsqGmgyP0yT8GAgXUnkdAoJisxvf33k7yO6OuoKmcTGuXPWSsm8Oh88nZicRLA9Y0rUeA==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "dotenv": "^16.4.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/ejs": { + "version": "3.1.10", + "resolved": "https://registry.npmjs.org/ejs/-/ejs-3.1.10.tgz", + "integrity": "sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "jake": "^10.8.5" + }, + "bin": { + "ejs": "bin/cli.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/electron": { + "version": "39.2.7", + "resolved": "https://registry.npmjs.org/electron/-/electron-39.2.7.tgz", + "integrity": "sha512-KU0uFS6LSTh4aOIC3miolcbizOFP7N1M46VTYVfqIgFiuA2ilfNaOHLDS9tCMvwwHRowAsvqBrh9NgMXcTOHCQ==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@electron/get": "^2.0.0", + "@types/node": "^22.7.7", + "extract-zip": "^2.0.1" + }, + "bin": { + "electron": "cli.js" + }, + "engines": { + "node": ">= 12.20.55" + } + }, + "node_modules/electron-builder": { + "version": "26.0.12", + "resolved": "https://registry.npmjs.org/electron-builder/-/electron-builder-26.0.12.tgz", + "integrity": "sha512-cD1kz5g2sgPTMFHjLxfMjUK5JABq3//J4jPswi93tOPFz6btzXYtK5NrDt717NRbukCUDOrrvmYVOWERlqoiXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "app-builder-lib": "26.0.12", + "builder-util": "26.0.11", + "builder-util-runtime": "9.3.1", + "chalk": "^4.1.2", + "dmg-builder": "26.0.12", + "fs-extra": "^10.1.0", + "is-ci": "^3.0.0", + "lazy-val": "^1.0.5", + "simple-update-notifier": "2.0.0", + "yargs": "^17.6.2" + }, + "bin": { + "electron-builder": "cli.js", + "install-app-deps": "install-app-deps.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/electron-builder-squirrel-windows": { + "version": "26.0.12", + "resolved": "https://registry.npmjs.org/electron-builder-squirrel-windows/-/electron-builder-squirrel-windows-26.0.12.tgz", + "integrity": "sha512-kpwXM7c/ayRUbYVErQbsZ0nQZX4aLHQrPEG9C4h9vuJCXylwFH8a7Jgi2VpKIObzCXO7LKHiCw4KdioFLFOgqA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "app-builder-lib": "26.0.12", + "builder-util": "26.0.11", + "electron-winstaller": "5.4.0" + } + }, + "node_modules/electron-builder/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/electron-builder/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/electron-builder/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/electron-log": { + "version": "5.4.3", + "resolved": "https://registry.npmjs.org/electron-log/-/electron-log-5.4.3.tgz", + "integrity": "sha512-sOUsM3LjZdugatazSQ/XTyNcw8dfvH1SYhXWiJyfYodAAKOZdHs0txPiLDXFzOZbhXgAgshQkshH2ccq0feyLQ==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/electron-publish": { + "version": "26.0.11", + "resolved": "https://registry.npmjs.org/electron-publish/-/electron-publish-26.0.11.tgz", + "integrity": "sha512-a8QRH0rAPIWH9WyyS5LbNvW9Ark6qe63/LqDB7vu2JXYpi0Gma5Q60Dh4tmTqhOBQt0xsrzD8qE7C+D7j+B24A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/fs-extra": "^9.0.11", + "builder-util": "26.0.11", + "builder-util-runtime": "9.3.1", + "chalk": "^4.1.2", + "form-data": "^4.0.0", + "fs-extra": "^10.1.0", + "lazy-val": "^1.0.5", + "mime": "^2.5.2" + } + }, + "node_modules/electron-publish/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/electron-publish/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/electron-publish/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.267", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", + "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==", + "dev": true, + "license": "ISC" + }, + "node_modules/electron-updater": { + "version": "6.6.2", + "resolved": "https://registry.npmjs.org/electron-updater/-/electron-updater-6.6.2.tgz", + "integrity": "sha512-Cr4GDOkbAUqRHP5/oeOmH/L2Bn6+FQPxVLZtPbcmKZC63a1F3uu5EefYOssgZXG3u/zBlubbJ5PJdITdMVggbw==", + "license": "MIT", + "dependencies": { + "builder-util-runtime": "9.3.1", + "fs-extra": "^10.1.0", + "js-yaml": "^4.1.0", + "lazy-val": "^1.0.5", + "lodash.escaperegexp": "^4.1.2", + "lodash.isequal": "^4.5.0", + "semver": "^7.6.3", + "tiny-typed-emitter": "^2.1.0" + } + }, + "node_modules/electron-updater/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/electron-updater/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/electron-updater/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/electron-vite": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/electron-vite/-/electron-vite-5.0.0.tgz", + "integrity": "sha512-OHp/vjdlubNlhNkPkL/+3JD34ii5ov7M0GpuXEVdQeqdQ3ulvVR7Dg/rNBLfS5XPIFwgoBLDf9sjjrL+CuDyRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.4", + "@babel/plugin-transform-arrow-functions": "^7.27.1", + "cac": "^6.7.14", + "esbuild": "^0.25.11", + "magic-string": "^0.30.19", + "picocolors": "^1.1.1" + }, + "bin": { + "electron-vite": "bin/electron-vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "@swc/core": "^1.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + } + } + }, + "node_modules/electron-winstaller": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/electron-winstaller/-/electron-winstaller-5.4.0.tgz", + "integrity": "sha512-bO3y10YikuUwUuDUQRM4KfwNkKhnpVO7IPdbsrejwN9/AABJzzTQ4GeHwyzNSrVO+tEH3/Np255a3sVZpZDjvg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "@electron/asar": "^3.2.1", + "debug": "^4.1.1", + "fs-extra": "^7.0.1", + "lodash": "^4.17.21", + "temp": "^0.9.0" + }, + "engines": { + "node": ">=8.0.0" + }, + "optionalDependencies": { + "@electron/windows-sign": "^1.1.2" + } + }, + "node_modules/electron-winstaller/node_modules/fs-extra": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", + "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.2", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + }, + "engines": { + "node": ">=6 <7 || >=8" + } + }, + "node_modules/electron/node_modules/@types/node": { + "version": "22.19.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.3.tgz", + "integrity": "sha512-1N9SBnWYOJTrNZCdh/yJE+t910Y128BoyY+zBLWhL3r0TYzlTmFdXrPwHL9DyFZmlEXNQQolTZh3KHV31QDhyA==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/electron/node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/encoding": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", + "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "iconv-lite": "^0.6.2" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", + "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.18.4", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.4.tgz", + "integrity": "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/environment": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/environment/-/environment-1.1.0.tgz", + "integrity": "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/err-code": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", + "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-abstract": { + "version": "1.24.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.1.tgz", + "integrity": "sha512-zHXBLhP+QehSSbsS9Pt23Gg964240DPd6QCf8WpkqEXxQ7fhdZzYsocOr5u7apWonsS5EjZDmTF+/slGMyasvw==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.2", + "arraybuffer.prototype.slice": "^1.0.4", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "data-view-buffer": "^1.0.2", + "data-view-byte-length": "^1.0.2", + "data-view-byte-offset": "^1.0.1", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "es-set-tostringtag": "^2.1.0", + "es-to-primitive": "^1.3.0", + "function.prototype.name": "^1.1.8", + "get-intrinsic": "^1.3.0", + "get-proto": "^1.0.1", + "get-symbol-description": "^1.1.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "internal-slot": "^1.1.0", + "is-array-buffer": "^3.0.5", + "is-callable": "^1.2.7", + "is-data-view": "^1.0.2", + "is-negative-zero": "^2.0.3", + "is-regex": "^1.2.1", + "is-set": "^2.0.3", + "is-shared-array-buffer": "^1.0.4", + "is-string": "^1.1.1", + "is-typed-array": "^1.1.15", + "is-weakref": "^1.1.1", + "math-intrinsics": "^1.1.0", + "object-inspect": "^1.13.4", + "object-keys": "^1.1.1", + "object.assign": "^4.1.7", + "own-keys": "^1.0.1", + "regexp.prototype.flags": "^1.5.4", + "safe-array-concat": "^1.1.3", + "safe-push-apply": "^1.0.0", + "safe-regex-test": "^1.1.0", + "set-proto": "^1.0.0", + "stop-iteration-iterator": "^1.1.0", + "string.prototype.trim": "^1.2.10", + "string.prototype.trimend": "^1.0.9", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.3", + "typed-array-byte-length": "^1.0.3", + "typed-array-byte-offset": "^1.0.4", + "typed-array-length": "^1.0.7", + "unbox-primitive": "^1.1.0", + "which-typed-array": "^1.1.19" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.2.2.tgz", + "integrity": "sha512-BrUQ0cPTB/IwXj23HtwHjS9n7O4h9FX94b4xc5zlTHxeLgTAdzYUDyy6KdExAl9lbN5rtfe44xpjpmj9grxs5w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.24.1", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.1.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.3.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "iterator.prototype": "^1.1.5", + "safe-array-concat": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.1.0.tgz", + "integrity": "sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-to-primitive": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz", + "integrity": "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7", + "is-date-object": "^1.0.5", + "is-symbol": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es6-error": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", + "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/esbuild": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz", + "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-react": { + "version": "7.37.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.5.tgz", + "integrity": "sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.8", + "array.prototype.findlast": "^1.2.5", + "array.prototype.flatmap": "^1.3.3", + "array.prototype.tosorted": "^1.1.4", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.2.1", + "estraverse": "^5.3.0", + "hasown": "^2.0.2", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.9", + "object.fromentries": "^2.0.8", + "object.values": "^1.2.1", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.5", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.12", + "string.prototype.repeat": "^1.0.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-7.0.1.tgz", + "integrity": "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.24.4", + "@babel/parser": "^7.24.4", + "hermes-parser": "^0.25.1", + "zod": "^3.25.0 || ^4.0.0", + "zod-validation-error": "^3.5.0 || ^4.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" + } + }, + "node_modules/eslint-plugin-react/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/eslint-plugin-react/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eventemitter3": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz", + "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==", + "dev": true, + "license": "MIT" + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/exponential-backoff": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/exponential-backoff/-/exponential-backoff-3.1.3.tgz", + "integrity": "sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/extract-zip": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz", + "integrity": "sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "debug": "^4.1.1", + "get-stream": "^5.1.0", + "yauzl": "^2.10.0" + }, + "bin": { + "extract-zip": "cli.js" + }, + "engines": { + "node": ">= 10.17.0" + }, + "optionalDependencies": { + "@types/yauzl": "^2.9.1" + } + }, + "node_modules/extsprintf": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.4.1.tgz", + "integrity": "sha512-Wrk35e8ydCKDj/ArClo1VrPVmN8zph5V4AtHwIuHhvMXsKf73UT3BOD+azBIW+3wOJ4FhEH7zyaJCFvChjYvMA==", + "dev": true, + "engines": [ + "node >=0.6.0" + ], + "license": "MIT", + "optional": true + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "pend": "~1.2.0" + } + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/filelist": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/filelist/-/filelist-1.0.4.tgz", + "integrity": "sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "minimatch": "^5.0.1" + } + }, + "node_modules/filelist/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/filelist/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "dev": true, + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/forwarded-parse": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/forwarded-parse/-/forwarded-parse-2.1.2.tgz", + "integrity": "sha512-alTFZZQDKMporBH77856pXgzhEzaUVmLCDk+egLgIgHst3Tpndzz8MnKe+GzRJRfvVdn69HhpW7cmXzvtLvJAw==", + "license": "MIT" + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/framer-motion": { + "version": "12.23.26", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.23.26.tgz", + "integrity": "sha512-cPcIhgR42xBn1Uj+PzOyheMtZ73H927+uWPDVhUMqxy8UHt6Okavb6xIz9J/phFUHUj0OncR6UvMfJTXoc/LKA==", + "license": "MIT", + "dependencies": { + "motion-dom": "^12.23.23", + "motion-utils": "^12.23.6", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "@emotion/is-prop-valid": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@emotion/is-prop-valid": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, + "node_modules/fs-extra": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", + "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + }, + "engines": { + "node": ">=6 <7 || >=8" + } + }, + "node_modules/fs-minipass": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-3.0.3.tgz", + "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function.prototype.name": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.8.tgz", + "integrity": "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "functions-have-names": "^1.2.3", + "hasown": "^2.0.2", + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", + "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/generator-function": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", + "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-east-asian-width": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz", + "integrity": "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-symbol-description": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz", + "integrity": "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/global-agent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-agent/-/global-agent-3.0.0.tgz", + "integrity": "sha512-PT6XReJ+D07JvGoxQMkT6qji/jVNfX/h364XHZOWeRzy64sSFr+xJ5OX7LI3b4MPQzdL4H8Y8M0xzPpsVMwA8Q==", + "dev": true, + "license": "BSD-3-Clause", + "optional": true, + "dependencies": { + "boolean": "^3.0.1", + "es6-error": "^4.1.1", + "matcher": "^3.0.0", + "roarr": "^2.15.3", + "semver": "^7.3.2", + "serialize-error": "^7.0.1" + }, + "engines": { + "node": ">=10.0" + } + }, + "node_modules/globals": { + "version": "17.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-17.0.0.tgz", + "integrity": "sha512-gv5BeD2EssA793rlFWVPMMCqefTlpusw6/2TbAVMy0FzcG8wKJn4O+NqJ4+XWmmwrayJgw5TzrmWjFgmz1XPqw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globalthis": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.1", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "11.8.6", + "resolved": "https://registry.npmjs.org/got/-/got-11.8.6.tgz", + "integrity": "sha512-6tfZ91bOr7bOXnK7PRDCGBLa1H4U080YHNaAQ2KsMGlLEzRbk44nsZF2E1IeRc3vtJHPVbKCYgdFbaGO2ljd8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sindresorhus/is": "^4.0.0", + "@szmarczak/http-timer": "^4.0.5", + "@types/cacheable-request": "^6.0.1", + "@types/responselike": "^1.0.0", + "cacheable-lookup": "^5.0.3", + "cacheable-request": "^7.0.2", + "decompress-response": "^6.0.0", + "http2-wrapper": "^1.0.0-beta.5.2", + "lowercase-keys": "^2.0.0", + "p-cancelable": "^2.0.0", + "responselike": "^2.0.0" + }, + "engines": { + "node": ">=10.19.0" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/has-bigints": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz", + "integrity": "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz", + "integrity": "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hermes-estree": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz", + "integrity": "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==", + "dev": true, + "license": "MIT" + }, + "node_modules/hermes-parser": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-parser/-/hermes-parser-0.25.1.tgz", + "integrity": "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "hermes-estree": "0.25.1" + } + }, + "node_modules/hosted-git-info": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", + "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/hosted-git-info/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/hosted-git-info/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/html-encoding-sniffer": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-6.0.0.tgz", + "integrity": "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@exodus/bytes": "^1.6.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/html-parse-stringify": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-parse-stringify/-/html-parse-stringify-3.0.1.tgz", + "integrity": "sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==", + "license": "MIT", + "dependencies": { + "void-elements": "3.1.0" + } + }, + "node_modules/html-url-attributes": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz", + "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", + "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/http2-wrapper": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-1.0.3.tgz", + "integrity": "sha512-V+23sDMr12Wnz7iTcDeJr3O6AIxlnvT/bmaAAAP/Xda35C90p9599p0F1eHR/N1KILWSoWVAiOMFjBBXaXSMxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.0.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.0.0" + } + }, + "node_modules/husky": { + "version": "9.1.7", + "resolved": "https://registry.npmjs.org/husky/-/husky-9.1.7.tgz", + "integrity": "sha512-5gs5ytaNjBrh5Ow3zrvdUUY+0VxIuWVL4i9irt6friV+BqdCfmV11CQTWMiBYWHbXhco+J1kHfTOUkePhCDvMA==", + "dev": true, + "license": "MIT", + "bin": { + "husky": "bin.js" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/typicode" + } + }, + "node_modules/i18next": { + "version": "25.7.3", + "resolved": "https://registry.npmjs.org/i18next/-/i18next-25.7.3.tgz", + "integrity": "sha512-2XaT+HpYGuc2uTExq9TVRhLsso+Dxym6PWaKpn36wfBmTI779OQ7iP/XaZHzrnGyzU4SHpFrTYLKfVyBfAhVNA==", + "funding": [ + { + "type": "individual", + "url": "https://locize.com" + }, + { + "type": "individual", + "url": "https://locize.com/i18next.html" + }, + { + "type": "individual", + "url": "https://www.i18next.com/how-to/faq#i18next-is-awesome.-how-can-i-support-the-project" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/runtime": "^7.28.4" + }, + "peerDependencies": { + "typescript": "^5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/iconv-corefoundation": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/iconv-corefoundation/-/iconv-corefoundation-1.1.7.tgz", + "integrity": "sha512-T10qvkw0zz4wnm560lOEg0PovVqUXuOFhhHAkixw8/sycy7TJt7v/RrkEKEQnAw2viPSJu6iAkErxnzR0g8PpQ==", + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "dependencies": { + "cli-truncate": "^2.1.0", + "node-addon-api": "^1.6.3" + }, + "engines": { + "node": "^8.11.2 || >=10" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-in-the-middle": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-2.0.1.tgz", + "integrity": "sha512-bruMpJ7xz+9jwGzrwEhWgvRrlKRYCRDBrfU+ur3FcasYXLJDxTruJ//8g2Noj+QFyRBeqbpj8Bhn4Fbw6HjvhA==", + "license": "Apache-2.0", + "dependencies": { + "acorn": "^8.14.0", + "acorn-import-attributes": "^1.9.5", + "cjs-module-lexer": "^1.2.2", + "module-details-from-path": "^1.0.3" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/infer-owner": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", + "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", + "dev": true, + "license": "ISC" + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/inline-style-parser": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz", + "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==", + "license": "MIT" + }, + "node_modules/internal-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", + "integrity": "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "hasown": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ip-address": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.1.0.tgz", + "integrity": "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz", + "integrity": "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-async-function": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz", + "integrity": "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "async-function": "^1.0.0", + "call-bound": "^1.0.3", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bigint": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz", + "integrity": "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-bigints": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-boolean-object": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz", + "integrity": "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-ci": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", + "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ci-info": "^3.2.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-view": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz", + "integrity": "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.1.0.tgz", + "integrity": "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-finalizationregistry": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz", + "integrity": "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-function": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.2.tgz", + "integrity": "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.4", + "generator-function": "^2.0.0", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-lambda": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-lambda/-/is-lambda-1.0.1.tgz", + "integrity": "sha512-z7CMFGNrENq5iFB9Bqo64Xk6Y9sg+epq1myIcdHaGnbMTYOxvzsEtdYqQUylB7LxfkvgrrjP32T6Ywciio9UIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-map": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-negative-zero": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-number-object": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz", + "integrity": "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-set": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz", + "integrity": "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-string": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.1.1.tgz", + "integrity": "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.1.tgz", + "integrity": "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-symbols": "^1.1.0", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-weakmap": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.1.tgz", + "integrity": "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz", + "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true, + "license": "MIT" + }, + "node_modules/isbinaryfile": { + "version": "5.0.7", + "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-5.0.7.tgz", + "integrity": "sha512-gnWD14Jh3FzS3CPhF0AxNOJ8CxqeblPTADzI38r0wt8ZyQl5edpy75myt08EG2oKvpyiqSqsx+Wkz9vtkbTqYQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 18.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/gjtorikian/" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/iterator.prototype": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.5.tgz", + "integrity": "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "get-proto": "^1.0.0", + "has-symbols": "^1.1.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jake": { + "version": "10.9.4", + "resolved": "https://registry.npmjs.org/jake/-/jake-10.9.4.tgz", + "integrity": "sha512-wpHYzhxiVQL+IV05BLE2Xn34zW1S223hvjtqk0+gsPrwd/8JNLXJgZZM/iPFsYc1xyphF+6M6EvdE5E9MBGkDA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "async": "^3.2.6", + "filelist": "^1.0.4", + "picocolors": "^1.1.1" + }, + "bin": { + "jake": "bin/cli.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jiti": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", + "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsdom": { + "version": "27.4.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.4.0.tgz", + "integrity": "sha512-mjzqwWRD9Y1J1KUi7W97Gja1bwOOM5Ug0EZ6UDK3xS7j7mndrkwozHtSblfomlzyB4NepioNt+B2sOSzczVgtQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@acemir/cssom": "^0.9.28", + "@asamuzakjp/dom-selector": "^6.7.6", + "@exodus/bytes": "^1.6.0", + "cssstyle": "^5.3.4", + "data-urls": "^6.0.0", + "decimal.js": "^10.6.0", + "html-encoding-sniffer": "^6.0.0", + "http-proxy-agent": "^7.0.2", + "https-proxy-agent": "^7.0.6", + "is-potential-custom-element-name": "^1.0.1", + "parse5": "^8.0.0", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^6.0.0", + "w3c-xmlserializer": "^5.0.0", + "webidl-conversions": "^8.0.0", + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^15.1.0", + "ws": "^8.18.3", + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + }, + "peerDependencies": { + "canvas": "^3.0.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-to-ts": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/json-schema-to-ts/-/json-schema-to-ts-3.1.1.tgz", + "integrity": "sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "ts-algebra": "^2.0.0" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", + "dev": true, + "license": "ISC", + "optional": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", + "dev": true, + "license": "MIT", + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jsx-ast-utils": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/lazy-val": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/lazy-val/-/lazy-val-1.0.5.tgz", + "integrity": "sha512-0/BnGCCfyUMkBpeDgWihanIAF9JmZhHBgUhEqzvf+adhNGLoP6TaiI5oF8oyb3I45P+PcnrqihSf01M0l0G5+Q==", + "license": "MIT" + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lightningcss": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.2.tgz", + "integrity": "sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.30.2", + "lightningcss-darwin-arm64": "1.30.2", + "lightningcss-darwin-x64": "1.30.2", + "lightningcss-freebsd-x64": "1.30.2", + "lightningcss-linux-arm-gnueabihf": "1.30.2", + "lightningcss-linux-arm64-gnu": "1.30.2", + "lightningcss-linux-arm64-musl": "1.30.2", + "lightningcss-linux-x64-gnu": "1.30.2", + "lightningcss-linux-x64-musl": "1.30.2", + "lightningcss-win32-arm64-msvc": "1.30.2", + "lightningcss-win32-x64-msvc": "1.30.2" + } + }, + "node_modules/lightningcss-android-arm64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.30.2.tgz", + "integrity": "sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.2.tgz", + "integrity": "sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.2.tgz", + "integrity": "sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.2.tgz", + "integrity": "sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.2.tgz", + "integrity": "sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.2.tgz", + "integrity": "sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.2.tgz", + "integrity": "sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.2.tgz", + "integrity": "sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.2.tgz", + "integrity": "sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.2.tgz", + "integrity": "sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.30.2", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.2.tgz", + "integrity": "sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lint-staged": { + "version": "16.2.7", + "resolved": "https://registry.npmjs.org/lint-staged/-/lint-staged-16.2.7.tgz", + "integrity": "sha512-lDIj4RnYmK7/kXMya+qJsmkRFkGolciXjrsZ6PC25GdTfWOAWetR0ZbsNXRAj1EHHImRSalc+whZFg56F5DVow==", + "dev": true, + "license": "MIT", + "dependencies": { + "commander": "^14.0.2", + "listr2": "^9.0.5", + "micromatch": "^4.0.8", + "nano-spawn": "^2.0.0", + "pidtree": "^0.6.0", + "string-argv": "^0.3.2", + "yaml": "^2.8.1" + }, + "bin": { + "lint-staged": "bin/lint-staged.js" + }, + "engines": { + "node": ">=20.17" + }, + "funding": { + "url": "https://opencollective.com/lint-staged" + } + }, + "node_modules/lint-staged/node_modules/commander": { + "version": "14.0.2", + "resolved": "https://registry.npmjs.org/commander/-/commander-14.0.2.tgz", + "integrity": "sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20" + } + }, + "node_modules/listr2": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/listr2/-/listr2-9.0.5.tgz", + "integrity": "sha512-ME4Fb83LgEgwNw96RKNvKV4VTLuXfoKudAmm2lP8Kk87KaMK0/Xrx/aAkMWmT8mDb+3MlFDspfbCs7adjRxA2g==", + "dev": true, + "license": "MIT", + "dependencies": { + "cli-truncate": "^5.0.0", + "colorette": "^2.0.20", + "eventemitter3": "^5.0.1", + "log-update": "^6.1.0", + "rfdc": "^1.4.1", + "wrap-ansi": "^9.0.0" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/listr2/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/listr2/node_modules/cli-truncate": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-5.1.1.tgz", + "integrity": "sha512-SroPvNHxUnk+vIW/dOSfNqdy1sPEFkrTk6TUtqLCnBlo3N7TNYYkzzN7uSD6+jVjrdO4+p8nH7JzH6cIvUem6A==", + "dev": true, + "license": "MIT", + "dependencies": { + "slice-ansi": "^7.1.0", + "string-width": "^8.0.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/listr2/node_modules/is-fullwidth-code-point": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", + "integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-east-asian-width": "^1.3.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/listr2/node_modules/slice-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-7.1.2.tgz", + "integrity": "sha512-iOBWFgUX7caIZiuutICxVgX1SdxwAVFFKwt1EvMYYec/NWO5meOJ6K5uQxhrYBdQJne4KxiqZc+KptFOWFSI9w==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "is-fullwidth-code-point": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, + "node_modules/listr2/node_modules/string-width": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-8.1.0.tgz", + "integrity": "sha512-Kxl3KJGb/gxkaUMOjRsQ8IrXiGW75O4E3RPjFIINOVH8AMl2SQ/yWdTzWwF3FevIX9LcMAjJW+GRwAlAbTSXdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-east-asian-width": "^1.3.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.escaperegexp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz", + "integrity": "sha512-TM9YBvyC84ZxE3rgfefxUWiQKLilstD6k7PTGt6wfbtXF8ixIJLOL3VYyV/z+ZiPLsVxAsKAFVwWlWeb2Y8Yyw==", + "license": "MIT" + }, + "node_modules/lodash.isequal": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", + "integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==", + "deprecated": "This package is deprecated. Use require('node:util').isDeepStrictEqual instead.", + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/log-update/-/log-update-6.1.0.tgz", + "integrity": "sha512-9ie8ItPR6tjY5uYJh8K/Zrv/RMZ5VOlOWvtZdEHYSTFKZfIBPQa9tOAEeAWhd+AnIneLJ22w5fjOYtoutpWq5w==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-escapes": "^7.0.0", + "cli-cursor": "^5.0.0", + "slice-ansi": "^7.1.0", + "strip-ansi": "^7.1.0", + "wrap-ansi": "^9.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/log-update/node_modules/is-fullwidth-code-point": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", + "integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-east-asian-width": "^1.3.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-update/node_modules/slice-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-7.1.2.tgz", + "integrity": "sha512-iOBWFgUX7caIZiuutICxVgX1SdxwAVFFKwt1EvMYYec/NWO5meOJ6K5uQxhrYBdQJne4KxiqZc+KptFOWFSI9w==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "is-fullwidth-code-point": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lowercase-keys": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", + "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "0.562.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.562.0.tgz", + "integrity": "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/lz-string": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz", + "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", + "dev": true, + "license": "MIT", + "bin": { + "lz-string": "bin/bin.js" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/make-fetch-happen": { + "version": "14.0.3", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-14.0.3.tgz", + "integrity": "sha512-QMjGbFTP0blj97EeidG5hk/QhKQ3T4ICckQGLgz38QF7Vgbk6e6FTARN8KhKxyBbWn8R0HU+bnw8aSoFPD4qtQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/agent": "^3.0.0", + "cacache": "^19.0.1", + "http-cache-semantics": "^4.1.1", + "minipass": "^7.0.2", + "minipass-fetch": "^4.0.0", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^1.0.0", + "proc-log": "^5.0.0", + "promise-retry": "^2.0.1", + "ssri": "^12.0.0" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/matcher": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/matcher/-/matcher-3.0.0.tgz", + "integrity": "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "escape-string-regexp": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.12.2", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz", + "integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz", + "integrity": "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==", + "dev": true, + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-function": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/mimic-function/-/mimic-function-5.0.1.tgz", + "integrity": "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mimic-response": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", + "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/minimatch": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz", + "integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==", + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/brace-expansion": "^5.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/minipass-collect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-2.0.1.tgz", + "integrity": "sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/minipass-fetch": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-4.0.1.tgz", + "integrity": "sha512-j7U11C5HXigVuutxebFadoYBbd7VSdZWggSe64NVdvWNBqGAiXPL2QVCehjmw7lY1oF9gOllYbORh+hiNgfPgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.0.3", + "minipass-sized": "^1.0.3", + "minizlib": "^3.0.1" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/minipass-flush": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", + "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minipass-flush/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-flush/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/minipass-pipeline": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", + "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-pipeline/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-pipeline/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/minipass-sized": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", + "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-sized/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-sized/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/minizlib": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz", + "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.1.2" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "dev": true, + "license": "MIT", + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/module-details-from-path": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.4.tgz", + "integrity": "sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==", + "license": "MIT" + }, + "node_modules/motion": { + "version": "12.23.26", + "resolved": "https://registry.npmjs.org/motion/-/motion-12.23.26.tgz", + "integrity": "sha512-Ll8XhVxY8LXMVYTCfme27WH2GjBrCIzY4+ndr5QKxsK+YwCtOi2B/oBi5jcIbik5doXuWT/4KKDOVAZJkeY5VQ==", + "license": "MIT", + "dependencies": { + "framer-motion": "^12.23.26", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "@emotion/is-prop-valid": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@emotion/is-prop-valid": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, + "node_modules/motion-dom": { + "version": "12.23.23", + "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-12.23.23.tgz", + "integrity": "sha512-n5yolOs0TQQBRUFImrRfs/+6X4p3Q4n1dUEqt/H58Vx7OW6RF+foWEgmTVDhIWJIMXOuNNL0apKH2S16en9eiA==", + "license": "MIT", + "dependencies": { + "motion-utils": "^12.23.6" + } + }, + "node_modules/motion-utils": { + "version": "12.23.6", + "resolved": "https://registry.npmjs.org/motion-utils/-/motion-utils-12.23.6.tgz", + "integrity": "sha512-eAWoPgr4eFEOFfg2WjIsMoqJTW6Z8MTUCgn/GZ3VRpClWBdnbjryiA3ZSNLyxCTmCQx4RmYX6jX1iWHbenUPNQ==", + "license": "MIT" + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nano-spawn": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/nano-spawn/-/nano-spawn-2.0.0.tgz", + "integrity": "sha512-tacvGzUY5o2D8CBh2rrwxyNojUsZNU2zjNTzKQrkgGJQTbGAfArVWXSKMBokBeeg6C7OLRGUEyoFlYbfeWQIqw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20.17" + }, + "funding": { + "url": "https://github.com/sindresorhus/nano-spawn?sponsor=1" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/node-abi": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-4.24.0.tgz", + "integrity": "sha512-u2EC1CeNe25uVtX3EZbdQ275c74zdZmmpzrHEQh2aIYqoVjlglfUpOX9YY85x1nlBydEKDVaSmMNhR7N82Qj8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.6.3" + }, + "engines": { + "node": ">=22.12.0" + } + }, + "node_modules/node-addon-api": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-1.7.2.tgz", + "integrity": "sha512-ibPK3iA+vaY1eEjESkQkM0BbCqFOaZMiXRTtdB0u7b4djtY6JnsjvPdUHVMg6xQt3B8fpTTWHI9A+ADjM9frzg==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/node-api-version": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/node-api-version/-/node-api-version-0.2.1.tgz", + "integrity": "sha512-2xP/IGGMmmSQpI1+O/k72jF/ykvZ89JeuKX3TLJAYPDVLUalrshrLHkeVcCCZqG/eEa635cr8IBYzgnDvM2O8Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.3.5" + } + }, + "node_modules/node-gyp": { + "version": "11.5.0", + "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-11.5.0.tgz", + "integrity": "sha512-ra7Kvlhxn5V9Slyus0ygMa2h+UqExPqUIkfk7Pc8QTLT956JLSy51uWFwHtIYy0vI8cB4BDhc/S03+880My/LQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.0", + "exponential-backoff": "^3.1.1", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^14.0.3", + "nopt": "^8.0.0", + "proc-log": "^5.0.0", + "semver": "^7.3.5", + "tar": "^7.4.3", + "tinyglobby": "^0.2.12", + "which": "^5.0.0" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/node-gyp/node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/node-gyp/node_modules/isexe": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-3.1.1.tgz", + "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16" + } + }, + "node_modules/node-gyp/node_modules/tar": { + "version": "7.5.2", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.2.tgz", + "integrity": "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.1.0", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/node-gyp/node_modules/which": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/which/-/which-5.0.0.tgz", + "integrity": "sha512-JEdGzHwwkrbWoGOlIHqQ5gtprKGOenpDHpxE9zVR1bWbOtYRyPPHMe9FaP6x61CmNaTThSkb0DAJte5jD+DmzQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^3.1.1" + }, + "bin": { + "node-which": "bin/which.js" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/node-gyp/node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nopt": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-8.1.0.tgz", + "integrity": "sha512-ieGu42u/Qsa4TFktmaKEwM6MQH0pOWnaB3htzh0JRtx84+Mebc0cbZYN5bC+6WTZ4+77xrL9Pn5m7CV6VIkV7A==", + "dev": true, + "license": "ISC", + "dependencies": { + "abbrev": "^3.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/normalize-url": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", + "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.entries": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.9.tgz", + "integrity": "sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.fromentries": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", + "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.values": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.1.tgz", + "integrity": "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obug": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", + "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/sxzz", + "https://opencollective.com/debug" + ], + "license": "MIT" + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz", + "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-function": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ora/node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ora/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/own-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", + "integrity": "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.6", + "object-keys": "^1.1.1", + "safe-push-apply": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/p-cancelable": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.1.1.tgz", + "integrity": "sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.4.tgz", + "integrity": "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz", + "integrity": "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/pe-library": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/pe-library/-/pe-library-0.4.1.tgz", + "integrity": "sha512-eRWB5LBz7PpDu4PUlwT0PhnQfTQJlDDdPa35urV4Osrm0t0AqQFGn+UIkU3klZvwJ8KPO3VbBFsXquA6p6kqZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12", + "npm": ">=6" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/jet2jet" + } + }, + "node_modules/pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==", + "dev": true, + "license": "MIT" + }, + "node_modules/pg-int8": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz", + "integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==", + "license": "ISC", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/pg-protocol": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.10.3.tgz", + "integrity": "sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==", + "license": "MIT" + }, + "node_modules/pg-types": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz", + "integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==", + "license": "MIT", + "dependencies": { + "pg-int8": "1.0.1", + "postgres-array": "~2.0.0", + "postgres-bytea": "~1.0.0", + "postgres-date": "~1.0.4", + "postgres-interval": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pidtree": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/pidtree/-/pidtree-0.6.0.tgz", + "integrity": "sha512-eG2dWTVw5bzqGRztnHExczNxt5VGsE6OwTeCG3fdUf9KBsZzO3R5OIIIzWR+iZA0NtZ+RDVdaoE2dK1cn6jH4g==", + "dev": true, + "license": "MIT", + "bin": { + "pidtree": "bin/pidtree.js" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/playwright": { + "version": "1.57.0", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.57.0.tgz", + "integrity": "sha512-ilYQj1s8sr2ppEJ2YVadYBN0Mb3mdo9J0wQ+UuDhzYqURwSoW4n1Xs5vs7ORwgDGmyEh33tRMeS8KhdkMoLXQw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "playwright-core": "1.57.0" + }, + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "fsevents": "2.3.2" + } + }, + "node_modules/playwright-core": { + "version": "1.57.0", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.57.0.tgz", + "integrity": "sha512-agTcKlMw/mjBWOnD6kFZttAAGHgi/Nw0CZ2o6JqWSbMlI219lAFLZZCyqByTsvVAJq5XA5H8cA6PrvBRpBWEuQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "playwright-core": "cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/plist": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/plist/-/plist-3.1.0.tgz", + "integrity": "sha512-uysumyrvkUX0rX/dEVqt8gC3sTBzd4zoWfLeS29nb53imdaXVvLINYXTI2GNqzaMuvacNx4uJQ8+b3zXR0pkgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@xmldom/xmldom": "^0.8.8", + "base64-js": "^1.5.1", + "xmlbuilder": "^15.1.1" + }, + "engines": { + "node": ">=10.4.0" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.10", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", + "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/postgres-array": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz", + "integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/postgres-bytea": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.1.tgz", + "integrity": "sha512-5+5HqXnsZPE65IJZSMkZtURARZelel2oXUEO8rH83VS/hxH5vv1uHquPg5wZs8yMAfdv971IU+kcPUczi7NVBQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-date": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz", + "integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-interval": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz", + "integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==", + "license": "MIT", + "dependencies": { + "xtend": "^4.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postject": { + "version": "1.0.0-alpha.6", + "resolved": "https://registry.npmjs.org/postject/-/postject-1.0.0-alpha.6.tgz", + "integrity": "sha512-b9Eb8h2eVqNE8edvKdwqkrY6O7kAwmI8kcnBv1NScolYJbo59XUF0noFq+lxbC1yN20bmC0WBEbDC5H/7ASb0A==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "commander": "^9.4.0" + }, + "bin": { + "postject": "dist/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/postject/node_modules/commander": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", + "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": "^12.20.0 || >=14" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/proc-log": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-5.0.0.tgz", + "integrity": "sha512-Azwzvl90HaF0aCz1JrDdXQykFakSSNPaPoiZ9fm5qJIMHioDZEi7OAdRwSm6rSoPtY3Qutnm3L7ogmg3dc+wbQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/promise-inflight": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", + "integrity": "sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/promise-retry": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", + "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "err-code": "^2.0.2", + "retry": "^0.12.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dev": true, + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/prop-types/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/proper-lockfile": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/proper-lockfile/-/proper-lockfile-4.1.2.tgz", + "integrity": "sha512-TjNPblN4BwAWMXU8s9AEz4JmQxnD1NNL7bNOY/AKUzyamc379FWASUhc/K1pL2noVb+XmZKLL68cjzLsiOAMaA==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "retry": "^0.12.0", + "signal-exit": "^3.0.2" + } + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/pump": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz", + "integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz", + "integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==", + "license": "MIT", + "peer": true, + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.3" + } + }, + "node_modules/react-i18next": { + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-16.5.0.tgz", + "integrity": "sha512-IMpPTyCTKxEj8klCrLKUTIUa8uYTd851+jcu2fJuUB9Agkk9Qq8asw4omyeHVnOXHrLgQJGTm5zTvn8HpaPiqw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.27.6", + "html-parse-stringify": "^3.0.1", + "use-sync-external-store": "^1.6.0" + }, + "peerDependencies": { + "i18next": ">= 25.6.2", + "react": ">= 16.8.0", + "typescript": "^5" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + }, + "typescript": { + "optional": true + } + } + }, + "node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT" + }, + "node_modules/react-markdown": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz", + "integrity": "sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "html-url-attributes": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=18", + "react": ">=18" + } + }, + "node_modules/react-refresh": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.18.0.tgz", + "integrity": "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-remove-scroll": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.7.2.tgz", + "integrity": "sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q==", + "license": "MIT", + "dependencies": { + "react-remove-scroll-bar": "^2.3.7", + "react-style-singleton": "^2.2.3", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.3", + "use-sidecar": "^1.1.3" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.8.tgz", + "integrity": "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==", + "license": "MIT", + "dependencies": { + "react-style-singleton": "^2.2.2", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-resizable-panels": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/react-resizable-panels/-/react-resizable-panels-4.2.0.tgz", + "integrity": "sha512-X/WbnyT/bgx09KEGvtJvaTr3axRrcBGcJdELIoGXZipCxc2hPwFsH/pfpVgwNVq5LpQxF/E5pPXGTQdjBnidPw==", + "license": "MIT", + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-style-singleton": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.3.tgz", + "integrity": "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==", + "license": "MIT", + "dependencies": { + "get-nonce": "^1.0.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/read-binary-file-arch": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/read-binary-file-arch/-/read-binary-file-arch-1.0.6.tgz", + "integrity": "sha512-BNg9EN3DD3GsDXX7Aa8O4p92sryjkmzYYgmgTAc6CA4uGLEDzFfxOxugu21akOxpcXHiEgsYkC6nPsQvLLLmEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.3.4" + }, + "bin": { + "read-binary-file-arch": "cli.js" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-5.0.0.tgz", + "integrity": "sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==", + "license": "MIT", + "engines": { + "node": ">= 20.19.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", + "integrity": "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.9", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.7", + "get-proto": "^1.0.1", + "which-builtin-type": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz", + "integrity": "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-in-the-middle": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-8.0.1.tgz", + "integrity": "sha512-QT7FVMXfWOYFbeRBF6nu+I6tr2Tf3u0q8RIEjNob/heKY/nh7drD/k7eeMFmSQgnTtCzLDcCu/XEnpW2wk4xCQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.3.5", + "module-details-from-path": "^1.0.3" + }, + "engines": { + "node": ">=9.3.0 || >=8.10.0 <9.0.0" + } + }, + "node_modules/resedit": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/resedit/-/resedit-1.7.2.tgz", + "integrity": "sha512-vHjcY2MlAITJhC0eRD/Vv8Vlgmu9Sd3LX9zZvtGzU5ZImdTN3+d6e/4mnTyV8vEbyf1sgNIrWxhWlrys52OkEA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pe-library": "^0.4.1" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/jet2jet" + } + }, + "node_modules/resolve": { + "version": "2.0.0-next.5", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", + "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/responselike": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-2.0.1.tgz", + "integrity": "sha512-4gl03wn3hj1HP3yzgdI7d3lCkF95F21Pz4BPGvKHinyQzALR5CapwC8yIi0Rh58DEMQ/SguC03wFj2k0M/mHhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "lowercase-keys": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/restore-cursor": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz", + "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^7.0.0", + "signal-exit": "^4.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/restore-cursor/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/rfdc": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.4.1.tgz", + "integrity": "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==", + "dev": true, + "license": "MIT" + }, + "node_modules/rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/roarr": { + "version": "2.15.4", + "resolved": "https://registry.npmjs.org/roarr/-/roarr-2.15.4.tgz", + "integrity": "sha512-CHhPh+UNHD2GTXNYhPWLnU8ONHdI+5DI+4EYIAOaiD63rHeYlZvyh8P+in5999TTSFgUYuKUAjzRI4mdh/p+2A==", + "dev": true, + "license": "BSD-3-Clause", + "optional": true, + "dependencies": { + "boolean": "^3.0.1", + "detect-node": "^2.0.4", + "globalthis": "^1.0.1", + "json-stringify-safe": "^5.0.1", + "semver-compare": "^1.0.0", + "sprintf-js": "^1.1.2" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/rollup": { + "version": "4.54.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.54.0.tgz", + "integrity": "sha512-3nk8Y3a9Ea8szgKhinMlGMhGMw89mqule3KWczxhIzqudyHdCIOHw8WJlj/r329fACjKLEh13ZSk7oE22kyeIw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.54.0", + "@rollup/rollup-android-arm64": "4.54.0", + "@rollup/rollup-darwin-arm64": "4.54.0", + "@rollup/rollup-darwin-x64": "4.54.0", + "@rollup/rollup-freebsd-arm64": "4.54.0", + "@rollup/rollup-freebsd-x64": "4.54.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.54.0", + "@rollup/rollup-linux-arm-musleabihf": "4.54.0", + "@rollup/rollup-linux-arm64-gnu": "4.54.0", + "@rollup/rollup-linux-arm64-musl": "4.54.0", + "@rollup/rollup-linux-loong64-gnu": "4.54.0", + "@rollup/rollup-linux-ppc64-gnu": "4.54.0", + "@rollup/rollup-linux-riscv64-gnu": "4.54.0", + "@rollup/rollup-linux-riscv64-musl": "4.54.0", + "@rollup/rollup-linux-s390x-gnu": "4.54.0", + "@rollup/rollup-linux-x64-gnu": "4.54.0", + "@rollup/rollup-linux-x64-musl": "4.54.0", + "@rollup/rollup-openharmony-arm64": "4.54.0", + "@rollup/rollup-win32-arm64-msvc": "4.54.0", + "@rollup/rollup-win32-ia32-msvc": "4.54.0", + "@rollup/rollup-win32-x64-gnu": "4.54.0", + "@rollup/rollup-win32-x64-msvc": "4.54.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/safe-array-concat": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", + "integrity": "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "has-symbols": "^1.1.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safe-push-apply": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz", + "integrity": "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/sanitize-filename": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/sanitize-filename/-/sanitize-filename-1.6.3.tgz", + "integrity": "sha512-y/52Mcy7aw3gRm7IrcGDFx/bCk4AhRh2eI9luHOQM86nZsqwiRkkq2GekHXBBD+SmPidc8i2PqtYZl+pWJ8Oeg==", + "dev": true, + "license": "WTFPL OR ISC", + "dependencies": { + "truncate-utf8-bytes": "^1.0.0" + } + }, + "node_modules/sax": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.3.tgz", + "integrity": "sha512-yqYn1JhPczigF94DMS+shiDMjDowYO6y9+wB/4WgO0Y19jWYk0lQ4tuG5KI7kj4FTp1wxPj5IFfcrz/s1c3jjQ==", + "license": "BlueOak-1.0.0" + }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "license": "ISC", + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-compare": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/semver-compare/-/semver-compare-1.0.0.tgz", + "integrity": "sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/serialize-error": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-7.0.1.tgz", + "integrity": "sha512-8I8TjW5KMOKsZQTvoxjuSIa7foAwPWGOts+6o7sgjz41/qMD9VQHEDxi6PBvK2l0MXUmqZyNpUK+T2tQaaElvw==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "type-fest": "^0.13.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-proto": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/set-proto/-/set-proto-1.0.0.tgz", + "integrity": "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC" + }, + "node_modules/simple-update-notifier": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/simple-update-notifier/-/simple-update-notifier-2.0.0.tgz", + "integrity": "sha512-a2B9Y0KlNXl9u/vsW6sTIu9vGEpfKu2wRV6l1H3XEas/0gUIzGzBoP/IouTcUQbm9JWZLH3COxyn03TYlFax6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/slice-ansi": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-3.0.0.tgz", + "integrity": "sha512-pSyv7bSTC7ig9Dcgbw9AuRNUb5k5V6oDudjZoMBSr13qpLBG7tB+zgCkARjq7xIUgdz5P1Qe8u+rSGdouOOIyQ==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks": { + "version": "2.8.7", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.7.tgz", + "integrity": "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ip-address": "^10.0.1", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", + "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "socks": "^2.8.3" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/sprintf-js": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz", + "integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==", + "dev": true, + "license": "BSD-3-Clause", + "optional": true + }, + "node_modules/ssri": { + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-12.0.0.tgz", + "integrity": "sha512-S7iGNosepx9RadX82oimUkvr0Ct7IjJbEbs4mJcTxst8um95J3sDYU1RBEOvdu6oL1Wek2ODI5i4MAw+dZ6cAQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/stat-mode": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/stat-mode/-/stat-mode-1.0.0.tgz", + "integrity": "sha512-jH9EhtKIjuXZ2cWxmXS8ZP80XyC3iasQxMDV8jzhNJpfDb7VbQLVW4Wvsxz9QZvzV+G4YoSfBUVKDOyxLzi/sg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/stop-iteration-iterator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz", + "integrity": "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "internal-slot": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-argv": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/string-argv/-/string-argv-0.3.2.tgz", + "integrity": "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.6.19" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.12", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz", + "integrity": "sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.6", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "regexp.prototype.flags": "^1.5.3", + "set-function-name": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.repeat": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz", + "integrity": "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", + "integrity": "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-data-property": "^1.1.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-object-atoms": "^1.0.0", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz", + "integrity": "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", + "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-js": { + "version": "1.1.21", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz", + "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", + "license": "MIT", + "dependencies": { + "style-to-object": "1.0.14" + } + }, + "node_modules/style-to-object": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz", + "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.7" + } + }, + "node_modules/sumchecker": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/sumchecker/-/sumchecker-3.0.1.tgz", + "integrity": "sha512-MvjXzkz/BOfyVDkG0oFOtBxHX2u3gKbMHIF/dXblZsgD3BWOFLmHovIpZY7BykJdAjcqRCBi1WYBNdEC9yI7vg==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "debug": "^4.1.0" + }, + "engines": { + "node": ">= 8.0" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true, + "license": "MIT" + }, + "node_modules/tailwind-merge": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.4.0.tgz", + "integrity": "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "4.1.18", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.18.tgz", + "integrity": "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==", + "license": "MIT", + "peer": true + }, + "node_modules/tapable": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz", + "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "dev": true, + "license": "ISC", + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/tar/node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/tar/node_modules/fs-minipass/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, + "node_modules/tar/node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/tar/node_modules/minizlib/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tar/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/temp": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/temp/-/temp-0.9.4.tgz", + "integrity": "sha512-yYrrsWnrXMcdsnu/7YMYAofM1ktpL5By7vZhf15CrXijWWrEYZks5AXBudalfSWJLlnen/QUJUB5aoB0kqZUGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "mkdirp": "^0.5.1", + "rimraf": "~2.6.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/temp-file": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/temp-file/-/temp-file-3.4.0.tgz", + "integrity": "sha512-C5tjlC/HCtVUOi3KWVokd4vHVViOmGjtLwIh4MuzPo/nMYTV/p1urt3RnMz2IWXDdKEGJH3k5+KPxtqRsUYGtg==", + "dev": true, + "license": "MIT", + "dependencies": { + "async-exit-hook": "^2.0.1", + "fs-extra": "^10.0.0" + } + }, + "node_modules/temp-file/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/temp-file/node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/temp-file/node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/temp/node_modules/mkdirp": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", + "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/tiny-async-pool": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/tiny-async-pool/-/tiny-async-pool-1.3.0.tgz", + "integrity": "sha512-01EAw5EDrcVrdgyCLgoSPvqznC0sVxDSVeiOz09FUpjh71G79VCqneOr+xvt7T1r76CF6ZZfPjHorN2+d+3mqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^5.5.0" + } + }, + "node_modules/tiny-async-pool/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/tiny-typed-emitter": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/tiny-typed-emitter/-/tiny-typed-emitter-2.1.0.tgz", + "integrity": "sha512-qVtvMxeXbVej0cQWKqVSSAHmKZEHAvxdF8HEUBFWts8h+xEo5m/lEiPakuyZ3BnCBjOD8i24kzNOiOLLgsSxhA==", + "license": "MIT" + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/tinyrainbow": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz", + "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tldts": { + "version": "7.0.19", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.19.tgz", + "integrity": "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tldts-core": "^7.0.19" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "7.0.19", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.19.tgz", + "integrity": "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==", + "dev": true, + "license": "MIT" + }, + "node_modules/tmp": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz", + "integrity": "sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.14" + } + }, + "node_modules/tmp-promise": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-3.0.3.tgz", + "integrity": "sha512-RwM7MoPojPxsOBYnyd2hy0bxtIlVrihNs9pj5SUvY8Zz1sQcQG2tG1hSr8PDxfgEB8RNKDhqbIlroIarSNDNsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "tmp": "^0.2.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tough-cookie": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.0.tgz", + "integrity": "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tldts": "^7.0.5" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/tr46": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz", + "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/truncate-utf8-bytes": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz", + "integrity": "sha512-95Pu1QXQvruGEhv62XCMO3Mm90GscOCClvrIUwCM0PYOXK3kaF3l3sIHxx71ThJfcbM2O5Au6SO3AWCSEfW4mQ==", + "dev": true, + "license": "WTFPL", + "dependencies": { + "utf8-byte-length": "^1.0.1" + } + }, + "node_modules/ts-algebra": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ts-algebra/-/ts-algebra-2.0.0.tgz", + "integrity": "sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==", + "license": "MIT" + }, + "node_modules/ts-api-utils": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.3.0.tgz", + "integrity": "sha512-6eg3Y9SF7SsAvGzRHQvvc1skDAhwI4YQ32ui1scxD1Ccr0G5qIIbUBT3pFTKX8kmWIQClHobtUdNuaBgwdfdWg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.13.1.tgz", + "integrity": "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "optional": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz", + "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz", + "integrity": "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz", + "integrity": "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.15", + "reflect.getprototypeof": "^1.0.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz", + "integrity": "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0", + "reflect.getprototypeof": "^1.0.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "devOptional": true, + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typescript-eslint": { + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.51.0.tgz", + "integrity": "sha512-jh8ZuM5oEh2PSdyQG9YAEM1TCGuWenLSuSUhf/irbVUNW9O5FhbFVONviN2TgMTBnUmyHv7E56rYnfLZK6TkiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.51.0", + "@typescript-eslint/parser": "8.51.0", + "@typescript-eslint/typescript-estree": "8.51.0", + "@typescript-eslint/utils": "8.51.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/unbox-primitive": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz", + "integrity": "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-bigints": "^1.0.2", + "has-symbols": "^1.1.0", + "which-boxed-primitive": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "license": "MIT" + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unique-filename": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-4.0.0.tgz", + "integrity": "sha512-XSnEewXmQ+veP7xX2dS5Q4yZAvO40cBN2MWkJ7D/6sW4Dg6wYBNwM1Vrnz1FhH5AdeLIlUXRI9e28z1YZi71NQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^5.0.0" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/unique-slug": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-5.0.0.tgz", + "integrity": "sha512-9OdaqO5kwqR+1kVgHAhsp5vPNU0hnxRa26rBFNfNgM7M6pNtgzeBn3s/xbyCQL3dcjzOatcef6UUHpB/6MaETg==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz", + "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz", + "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.3.tgz", + "integrity": "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", + "integrity": "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==", + "license": "MIT", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/utf8-byte-length": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/utf8-byte-length/-/utf8-byte-length-1.0.5.tgz", + "integrity": "sha512-Xn0w3MtiQ6zoz2vFyUVruaCL53O/DwUvkEeOvj+uulMm0BkUGYWmBYVyElqZaSLhY6ZD0ulfU3aBra2aVT4xfA==", + "dev": true, + "license": "(WTFPL OR MIT)" + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/uuid": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-13.0.0.tgz", + "integrity": "sha512-XQegIaBTVUjSHliKqcnFqYypAd4S+WCYt5NIeRs6w/UAry7z8Y9j5ZwRRL4kzq9U3sD6v+85er9FvkEaBpji2w==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist-node/bin/uuid" + } + }, + "node_modules/verror": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.1.tgz", + "integrity": "sha512-veufcmxri4e3XSrT0xwfUR7kguIkaxBeosDg00yDWhk49wdwkSUrvvsm7nc75e1PUyvIeZj6nS8VQRYz2/S4Xg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vite": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.0.tgz", + "integrity": "sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/@esbuild/aix-ppc64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz", + "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz", + "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz", + "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/android-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz", + "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz", + "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz", + "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz", + "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz", + "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz", + "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz", + "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ia32": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz", + "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-loong64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz", + "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-mips64el": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz", + "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ppc64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz", + "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-riscv64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz", + "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-s390x": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz", + "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz", + "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz", + "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/netbsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz", + "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz", + "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/openbsd-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz", + "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz", + "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/sunos-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz", + "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-arm64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz", + "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-ia32": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz", + "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-x64": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz", + "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/vite/node_modules/esbuild": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz", + "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.2", + "@esbuild/android-arm": "0.27.2", + "@esbuild/android-arm64": "0.27.2", + "@esbuild/android-x64": "0.27.2", + "@esbuild/darwin-arm64": "0.27.2", + "@esbuild/darwin-x64": "0.27.2", + "@esbuild/freebsd-arm64": "0.27.2", + "@esbuild/freebsd-x64": "0.27.2", + "@esbuild/linux-arm": "0.27.2", + "@esbuild/linux-arm64": "0.27.2", + "@esbuild/linux-ia32": "0.27.2", + "@esbuild/linux-loong64": "0.27.2", + "@esbuild/linux-mips64el": "0.27.2", + "@esbuild/linux-ppc64": "0.27.2", + "@esbuild/linux-riscv64": "0.27.2", + "@esbuild/linux-s390x": "0.27.2", + "@esbuild/linux-x64": "0.27.2", + "@esbuild/netbsd-arm64": "0.27.2", + "@esbuild/netbsd-x64": "0.27.2", + "@esbuild/openbsd-arm64": "0.27.2", + "@esbuild/openbsd-x64": "0.27.2", + "@esbuild/openharmony-arm64": "0.27.2", + "@esbuild/sunos-x64": "0.27.2", + "@esbuild/win32-arm64": "0.27.2", + "@esbuild/win32-ia32": "0.27.2", + "@esbuild/win32-x64": "0.27.2" + } + }, + "node_modules/vite/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/vite/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/vitest": { + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.16.tgz", + "integrity": "sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "4.0.16", + "@vitest/mocker": "4.0.16", + "@vitest/pretty-format": "4.0.16", + "@vitest/runner": "4.0.16", + "@vitest/snapshot": "4.0.16", + "@vitest/spy": "4.0.16", + "@vitest/utils": "4.0.16", + "es-module-lexer": "^1.7.0", + "expect-type": "^1.2.2", + "magic-string": "^0.30.21", + "obug": "^2.1.1", + "pathe": "^2.0.3", + "picomatch": "^4.0.3", + "std-env": "^3.10.0", + "tinybench": "^2.9.0", + "tinyexec": "^1.0.2", + "tinyglobby": "^0.2.15", + "tinyrainbow": "^3.0.3", + "vite": "^6.0.0 || ^7.0.0", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || >=24.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@opentelemetry/api": "^1.9.0", + "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", + "@vitest/browser-playwright": "4.0.16", + "@vitest/browser-preview": "4.0.16", + "@vitest/browser-webdriverio": "4.0.16", + "@vitest/ui": "4.0.16", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/browser-preview": { + "optional": true + }, + "@vitest/browser-webdriverio": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/vitest/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/void-elements": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", + "integrity": "sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/w3c-xmlserializer": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", + "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "dev": true, + "license": "MIT", + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/webidl-conversions": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.0.tgz", + "integrity": "sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=20" + } + }, + "node_modules/whatwg-mimetype": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", + "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-url": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-15.1.0.tgz", + "integrity": "sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "tr46": "^6.0.0", + "webidl-conversions": "^8.0.0" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz", + "integrity": "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-bigint": "^1.1.0", + "is-boolean-object": "^1.2.1", + "is-number-object": "^1.1.1", + "is-string": "^1.1.1", + "is-symbol": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.1.tgz", + "integrity": "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "function.prototype.name": "^1.1.6", + "has-tostringtag": "^1.0.2", + "is-async-function": "^2.0.0", + "is-date-object": "^1.1.0", + "is-finalizationregistry": "^1.1.0", + "is-generator-function": "^1.0.10", + "is-regex": "^1.2.1", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.1.0", + "which-collection": "^1.0.2", + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.19", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz", + "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xml-name-validator": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", + "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/xmlbuilder": { + "version": "15.1.1", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-15.1.1.tgz", + "integrity": "sha512-yMqGBqtXyeN1e3TGYvgNgDVZ3j84W4cwkOXQswghol6APgZWaff9lnbvN7MHYJOiXsvGPXtjTYJEiC9J2wv9Eg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true, + "license": "MIT" + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "license": "MIT", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yaml": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", + "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", + "dev": true, + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14.6" + }, + "funding": { + "url": "https://github.com/sponsors/eemeli" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.2.tgz", + "integrity": "sha512-b8L8yn4rIVfiXyHAmnr52/ZEpDumlT0bmxiq3Ws1ybrinhflGpt12Hvv54kYnEsGPRs6o/Ka3/ppA2OWY21IVg==", + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-validation-error": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-4.0.2.tgz", + "integrity": "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "zod": "^3.25.0 || ^4.0.0" + } + }, + "node_modules/zustand": { + "version": "5.0.9", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.9.tgz", + "integrity": "sha512-ALBtUj0AfjJt3uNRQoL1tL2tMvj6Gp/6e39dnfT6uzpelGru8v1tPOGBzayOWbPJvujM8JojDk3E1LxeFisBNg==", + "license": "MIT", + "engines": { + "node": ">=12.20.0" + }, + "peerDependencies": { + "@types/react": ">=18.0.0", + "immer": ">=9.0.6", + "react": ">=18.0.0", + "use-sync-external-store": ">=1.2.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + }, + "use-sync-external-store": { + "optional": true + } + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } } } } diff --git a/package.json b/package.json index 10e3e32706..07876f8fd9 100644 --- a/package.json +++ b/package.json @@ -4,6 +4,10 @@ "description": "Autonomous multi-agent coding framework powered by Claude AI", "license": "AGPL-3.0", "author": "Auto Claude Team", + "workspaces": [ + "apps/*", + "libs/*" + ], "scripts": { "install:backend": "node scripts/install-backend.js", "install:frontend": "cd apps/frontend && npm install", @@ -36,5 +40,8 @@ "coding", "agents", "electron" - ] + ], + "devDependencies": { + "jsdom": "^27.4.0" + } } diff --git a/scripts/bump-version.js b/scripts/bump-version.js index 6297d580db..00f6a0d7a3 100644 --- a/scripts/bump-version.js +++ b/scripts/bump-version.js @@ -149,6 +149,36 @@ function updateBackendInit(newVersion) { return true; } +// Check if CHANGELOG.md has an entry for the version +function checkChangelogEntry(version) { + const changelogPath = path.join(__dirname, '..', 'CHANGELOG.md'); + + if (!fs.existsSync(changelogPath)) { + warning('CHANGELOG.md not found - you will need to create it before releasing'); + return false; + } + + const content = fs.readFileSync(changelogPath, 'utf8'); + + // Look for "## X.Y.Z" or "## X.Y.Z -" header using string matching + // This avoids regex injection concerns from user-provided version strings + const lines = content.split('\n'); + const versionHeaderPrefix = `## ${version}`; + + for (const line of lines) { + // Check if line starts with "## X.Y.Z" followed by whitespace, dash, or end of line + if (line.startsWith(versionHeaderPrefix)) { + const afterVersion = line.slice(versionHeaderPrefix.length); + // Valid if nothing follows, or whitespace/dash follows + if (afterVersion === '' || afterVersion[0] === ' ' || afterVersion[0] === '-' || afterVersion[0] === '\t') { + return true; + } + } + } + + return false; +} + // Main function function main() { const bumpType = process.argv[2]; @@ -198,7 +228,35 @@ function main() { // after the GitHub release is successfully published. This prevents version // mismatches where README shows a version that doesn't exist yet. - // 6. Create git commit + // 6. Check if CHANGELOG.md has entry for this version + info('Checking CHANGELOG.md...'); + const hasChangelogEntry = checkChangelogEntry(newVersion); + + if (hasChangelogEntry) { + success(`CHANGELOG.md already has entry for ${newVersion}`); + } else { + log(''); + warning('โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•'); + warning(' CHANGELOG.md does not have an entry for version ' + newVersion); + warning('โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•'); + warning(''); + warning(' The release workflow will FAIL if CHANGELOG.md is not updated!'); + warning(''); + warning(' Please add an entry to CHANGELOG.md before creating your PR:'); + warning(''); + log(` ## ${newVersion} - Your Release Title`, colors.cyan); + log('', colors.cyan); + log(' ### โœจ New Features', colors.cyan); + log(' - Feature description', colors.cyan); + log('', colors.cyan); + log(' ### ๐Ÿ› Bug Fixes', colors.cyan); + log(' - Fix description', colors.cyan); + warning(''); + warning('โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•'); + log(''); + } + + // 7. Create git commit info('Creating git commit...'); exec('git add apps/frontend/package.json package.json apps/backend/__init__.py'); exec(`git commit -m "chore: bump version to ${newVersion}"`); @@ -208,18 +266,28 @@ function main() { // when this commit is merged to main, ensuring releases only happen after // successful builds. - // 7. Instructions + // 8. Instructions log('\n๐Ÿ“‹ Next steps:', colors.yellow); - log(` 1. Review the changes: git log -1`, colors.yellow); - log(` 2. Push to your branch: git push origin `, colors.yellow); - log(` 3. Create PR to main (or merge develop โ†’ main)`, colors.yellow); - log(` 4. When merged, GitHub Actions will automatically:`, colors.yellow); + if (!hasChangelogEntry) { + log(` 1. UPDATE CHANGELOG.md with release notes for ${newVersion}`, colors.red); + log(` 2. Commit the changelog: git add CHANGELOG.md && git commit --amend --no-edit`, colors.yellow); + log(` 3. Push to your branch: git push origin `, colors.yellow); + } else { + log(` 1. Review the changes: git log -1`, colors.yellow); + log(` 2. Push to your branch: git push origin `, colors.yellow); + } + log(` ${hasChangelogEntry ? '3' : '4'}. Create PR to main (or merge develop โ†’ main)`, colors.yellow); + log(` ${hasChangelogEntry ? '4' : '5'}. When merged, GitHub Actions will automatically:`, colors.yellow); + log(` - Validate CHANGELOG.md has entry for v${newVersion}`, colors.yellow); log(` - Create tag v${newVersion}`, colors.yellow); log(` - Build binaries for all platforms`, colors.yellow); - log(` - Create GitHub release with changelog`, colors.yellow); + log(` - Create GitHub release with changelog from CHANGELOG.md`, colors.yellow); log(` - Update README with new version\n`, colors.yellow); warning('Note: The commit has been created locally but NOT pushed.'); + if (!hasChangelogEntry) { + warning('IMPORTANT: Update CHANGELOG.md before pushing or the release will fail!'); + } info('Tags are created automatically by GitHub Actions when merged to main.'); log('\nโœจ Version bump complete!\n', colors.green); diff --git a/scripts/install-backend.js b/scripts/install-backend.js index a90372b7ca..40d3a4fc3f 100644 --- a/scripts/install-backend.js +++ b/scripts/install-backend.js @@ -27,10 +27,11 @@ function run(cmd, options = {}) { } // Find Python 3.12+ +// Prefer 3.12 first since it has the most stable wheel support for native packages function findPython() { const candidates = isWindows - ? ['py -3.14', 'py -3.13', 'py -3.12', 'python3.14', 'python3.13', 'python3.12', 'python3', 'python'] - : ['python3.14', 'python3.13', 'python3.12', 'python3', 'python']; + ? ['py -3.12', 'py -3.13', 'py -3.14', 'python3.12', 'python3.13', 'python3.14', 'python3', 'python'] + : ['python3.12', 'python3.13', 'python3.14', 'python3', 'python']; for (const cmd of candidates) { try { @@ -102,6 +103,29 @@ async function main() { process.exit(1); } + // Create .env file from .env.example if it doesn't exist + const envPath = path.join(backendDir, '.env'); + const envExamplePath = path.join(backendDir, '.env.example'); + + if (fs.existsSync(envPath)) { + console.log('\nโœ“ .env file already exists'); + } else if (fs.existsSync(envExamplePath)) { + console.log('\nCreating .env file from .env.example...'); + try { + fs.copyFileSync(envExamplePath, envPath); + console.log('โœ“ Created .env file'); + console.log(' Please configure it with your credentials:'); + console.log(` - Run: claude setup-token`); + console.log(` - Or edit: ${envPath}`); + } catch (error) { + console.warn('Warning: Could not create .env file:', error.message); + console.warn('You will need to manually copy .env.example to .env'); + } + } else { + console.warn('\nWarning: .env.example not found. Cannot auto-create .env file.'); + console.warn('Please create a .env file manually if your configuration requires it.'); + } + console.log('\nBackend installation complete!'); console.log(`Virtual environment: ${venvDir}`); } diff --git a/shared_docs/SECURITY_COMMANDS.md b/shared_docs/SECURITY_COMMANDS.md new file mode 100644 index 0000000000..1cade74425 --- /dev/null +++ b/shared_docs/SECURITY_COMMANDS.md @@ -0,0 +1,154 @@ +# Security Commands Configuration + +Auto Claude uses a dynamic security system that controls which shell commands the AI agent can execute. This prevents potentially dangerous operations while allowing legitimate development commands. + +## How It Works + +```text +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Command Validation โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ 1. Base Commands (always allowed) โ”‚ +โ”‚ โ””โ”€โ”€ ls, cat, grep, git, echo, etc. โ”‚ +โ”‚ โ”‚ +โ”‚ 2. Auto-Detected Stack Commands โ”‚ +โ”‚ โ””โ”€โ”€ Analyzer detects Cargo.toml โ†’ adds cargo, rustc โ”‚ +โ”‚ โ””โ”€โ”€ Analyzer detects package.json โ†’ adds npm, node โ”‚ +โ”‚ โ”‚ +โ”‚ 3. Custom Allowlist (manual additions) โ”‚ +โ”‚ โ””โ”€โ”€ .auto-claude-allowlist file โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Automatic Detection + +When you start a task, Auto Claude analyzes your project and automatically allows commands based on detected technologies: + +| Detected File | Commands Added | +|---------------|----------------| +| `Cargo.toml` | `cargo`, `rustc`, `rustup`, `rustfmt`, `cargo-clippy`, etc. | +| `package.json` | `npm`, `node`, `npx` | +| `yarn.lock` | `yarn` | +| `pnpm-lock.yaml` | `pnpm` | +| `pyproject.toml` | `python`, `pip`, `poetry`, `uv` | +| `go.mod` | `go` | +| `*.csproj` / `*.sln` | `dotnet` | +| `pubspec.yaml` | `dart`, `flutter`, `pub` | +| `Dockerfile` | `docker` | +| `docker-compose.yml` | `docker-compose` | +| `Makefile` | `make` | + +The full detection logic is in `apps/backend/project/stack_detector.py`. + +### Generated Profile + +The analyzer saves its results to `.auto-claude-security.json` in your project root: + +```json +{ + "base_commands": ["ls", "cat", "grep", "..."], + "stack_commands": ["cargo", "rustc", "rustup"], + "detected_stack": { + "languages": ["rust"], + "package_managers": ["cargo"], + "frameworks": [], + "databases": [] + }, + "project_hash": "abc123...", + "created_at": "2024-01-15T10:30:00" +} +``` + +This file is auto-generated. Don't edit it manually - it will be overwritten. + +## Custom Allowlist + +For commands that aren't auto-detected, create a `.auto-claude-allowlist` file in your project root: + +```text +# .auto-claude-allowlist +# One command per line, no comments on same line + +# Custom build tools +bazel +buck + +# Project-specific scripts +./scripts/deploy.sh + +# Additional tools +ansible +terraform +``` + +### When to Use the Allowlist + +Use `.auto-claude-allowlist` when: + +- Your project uses uncommon build tools (Bazel, Buck, Pants, etc.) +- You have custom scripts that need to be executable +- Auto-detection doesn't recognize your stack +- You're using bleeding-edge tools not yet in the detection system + +### Format + +- One command per line +- Lines starting with `#` are ignored +- Empty lines are ignored +- Use the base command name only (e.g., `cargo`, not `cargo build`) + +## Troubleshooting + +### "Command X is not allowed" + +1. **Check if it should be auto-detected:** + - Does your project have the expected config file? (e.g., `Cargo.toml` for Rust) + - Run in project root, not a subdirectory + +2. **Add to allowlist:** + + ```bash + echo "your-command" >> .auto-claude-allowlist + ``` + +3. **Force re-analysis** (if detection seems wrong): + - Delete `.auto-claude-security.json` + - Restart the task + +### Allowlist Changes Not Taking Effect + +The security profile cache updates automatically when: +- `.auto-claude-allowlist` is modified (mtime changes) +- `.auto-claude-security.json` is modified + +No restart required - changes apply on the next command. + +### Worktree Mode + +When using isolated worktrees, security files are automatically copied from your main project on each worktree setup. + +**Important:** Unlike environment files (which are only copied if missing), security files **always overwrite** existing files in the worktree. This ensures the worktree uses the same security rules as the main project, preventing security bypasses through stale configurations. + +This means: +- Changes to allowlist in the main project are reflected in new worktrees +- You cannot have different security rules per worktree (by design) +- If you need to test with different commands, modify the main project's allowlist + +## Security Considerations + +The allowlist system exists to prevent: +- Accidental `rm -rf /` or similar destructive commands +- Execution of unknown binaries +- Network operations with unrestricted tools + +Only add commands you trust and understand. + +## Adding Support for New Technologies + +If you're using a technology that should be auto-detected, consider contributing: + +1. Add detection logic to `apps/backend/project/stack_detector.py` +2. Add commands to `apps/backend/project/command_registry/languages.py` +3. Submit a PR! + +See existing detectors for examples. diff --git a/tests/test_finding_validation.py b/tests/test_finding_validation.py index f01b96011f..edf6b77975 100644 --- a/tests/test_finding_validation.py +++ b/tests/test_finding_validation.py @@ -4,6 +4,10 @@ Tests the finding-validator agent integration and FindingValidationResult models. This system prevents false positives from persisting by re-investigating unresolved findings. + +NOTE: The validation system has been updated to use EVIDENCE-BASED validation +instead of confidence scores. The key field is now `evidence_verified_in_file` +which is a boolean indicating whether the code evidence was found at the specified location. """ import sys @@ -32,10 +36,8 @@ ) from models import ( PRReviewFinding, - PRReviewResult, ReviewSeverity, ReviewCategory, - MergeVerdict, ) @@ -55,12 +57,12 @@ def test_valid_confirmed_valid(self): code_evidence="const query = `SELECT * FROM users WHERE id = ${userId}`;", line_range=(45, 45), explanation="SQL injection is present - user input is concatenated directly into the query.", - confidence=0.92, + evidence_verified_in_file=True, ) assert result.finding_id == "SEC-001" assert result.validation_status == "confirmed_valid" assert "SELECT" in result.code_evidence - assert result.confidence == 0.92 + assert result.evidence_verified_in_file is True def test_valid_dismissed_false_positive(self): """Test creating a dismissed_false_positive validation result.""" @@ -70,10 +72,10 @@ def test_valid_dismissed_false_positive(self): code_evidence="const sanitized = DOMPurify.sanitize(data);", line_range=(23, 26), explanation="Original finding claimed XSS but code uses DOMPurify.sanitize() for protection.", - confidence=0.88, + evidence_verified_in_file=True, ) assert result.validation_status == "dismissed_false_positive" - assert result.confidence == 0.88 + assert result.evidence_verified_in_file is True def test_valid_needs_human_review(self): """Test creating a needs_human_review validation result.""" @@ -83,10 +85,23 @@ def test_valid_needs_human_review(self): code_evidence="async function handleRequest(req) { ... }", line_range=(100, 150), explanation="Race condition claim requires runtime analysis to verify.", - confidence=0.45, + evidence_verified_in_file=True, ) assert result.validation_status == "needs_human_review" - assert result.confidence == 0.45 + assert result.evidence_verified_in_file is True + + def test_hallucinated_finding_not_verified(self): + """Test creating a result where evidence was not verified (hallucinated finding).""" + result = FindingValidationResult( + finding_id="HALLUC-001", + validation_status="dismissed_false_positive", + code_evidence="// Line 710 does not exist - file only has 600 lines", + line_range=(600, 600), + explanation="Original finding cited line 710 but file only has 600 lines. Hallucinated finding.", + evidence_verified_in_file=False, + ) + assert result.validation_status == "dismissed_false_positive" + assert result.evidence_verified_in_file is False def test_code_evidence_required(self): """Test that code_evidence cannot be empty.""" @@ -97,7 +112,7 @@ def test_code_evidence_required(self): code_evidence="", # Empty string should fail line_range=(45, 45), explanation="This is a detailed explanation of the issue.", - confidence=0.92, + evidence_verified_in_file=True, ) errors = exc_info.value.errors() assert any("code_evidence" in str(e) for e in errors) @@ -111,34 +126,24 @@ def test_explanation_min_length(self): code_evidence="const x = 1;", line_range=(45, 45), explanation="Too short", # Less than 20 chars - confidence=0.92, + evidence_verified_in_file=True, ) errors = exc_info.value.errors() assert any("explanation" in str(e) for e in errors) - def test_confidence_normalized_from_percentage(self): - """Test that confidence 0-100 is normalized to 0.0-1.0.""" - result = FindingValidationResult( - finding_id="SEC-001", - validation_status="confirmed_valid", - code_evidence="const query = `SELECT * FROM users`;", - line_range=(45, 45), - explanation="SQL injection vulnerability found in the query construction.", - confidence=85, # Percentage value - ) - assert result.confidence == 0.85 - - def test_confidence_range_validation(self): - """Test that confidence must be between 0.0 and 1.0 after normalization.""" - with pytest.raises(ValidationError): + def test_evidence_verified_required(self): + """Test that evidence_verified_in_file is required.""" + with pytest.raises(ValidationError) as exc_info: FindingValidationResult( finding_id="SEC-001", validation_status="confirmed_valid", - code_evidence="const x = 1;", + code_evidence="const query = `SELECT * FROM users`;", line_range=(45, 45), - explanation="This is a detailed explanation of the issue.", - confidence=150, # Will normalize to 1.5, which is out of range + explanation="SQL injection vulnerability found in the query construction.", + # Missing evidence_verified_in_file ) + errors = exc_info.value.errors() + assert any("evidence_verified_in_file" in str(e) for e in errors) def test_invalid_validation_status(self): """Test that invalid validation_status values are rejected.""" @@ -149,7 +154,7 @@ def test_invalid_validation_status(self): code_evidence="const x = 1;", line_range=(45, 45), explanation="This is a detailed explanation of the issue.", - confidence=0.92, + evidence_verified_in_file=True, ) @@ -166,7 +171,7 @@ def test_valid_response_with_multiple_validations(self): code_evidence="const query = `SELECT * FROM users`;", line_range=(45, 45), explanation="SQL injection confirmed in this query.", - confidence=0.92, + evidence_verified_in_file=True, ), FindingValidationResult( finding_id="QUAL-002", @@ -174,7 +179,7 @@ def test_valid_response_with_multiple_validations(self): code_evidence="const sanitized = DOMPurify.sanitize(data);", line_range=(23, 26), explanation="Code uses DOMPurify so XSS claim is false.", - confidence=0.88, + evidence_verified_in_file=True, ), ], summary="1 finding confirmed valid, 1 dismissed as false positive", @@ -197,7 +202,6 @@ def test_response_includes_finding_validations(self): ResolutionVerification( finding_id="SEC-001", status="unresolved", - confidence=0.85, evidence="File was not modified", ) ], @@ -208,7 +212,7 @@ def test_response_includes_finding_validations(self): code_evidence="const query = `SELECT * FROM users`;", line_range=(45, 45), explanation="SQL injection confirmed in this query.", - confidence=0.92, + evidence_verified_in_file=True, ) ], new_findings=[], @@ -231,7 +235,6 @@ def test_response_with_dismissed_findings(self): ResolutionVerification( finding_id="SEC-001", status="unresolved", - confidence=0.50, evidence="Line wasn't changed but need to verify", ) ], @@ -242,7 +245,7 @@ def test_response_with_dismissed_findings(self): code_evidence="const query = db.prepare('SELECT * FROM users WHERE id = ?').get(userId);", line_range=(45, 48), explanation="Original review misread - using parameterized query.", - confidence=0.95, + evidence_verified_in_file=True, ) ], new_findings=[], @@ -275,11 +278,10 @@ def test_finding_with_validation_fields(self): line=42, validation_status="confirmed_valid", validation_evidence="const query = `SELECT * FROM users`;", - validation_confidence=0.92, validation_explanation="SQL injection confirmed in the query.", ) assert finding.validation_status == "confirmed_valid" - assert finding.validation_confidence == 0.92 + assert finding.validation_evidence is not None def test_finding_without_validation_fields(self): """Test that validation fields are optional.""" @@ -294,7 +296,6 @@ def test_finding_without_validation_fields(self): ) assert finding.validation_status is None assert finding.validation_evidence is None - assert finding.validation_confidence is None assert finding.validation_explanation is None def test_finding_to_dict_includes_validation(self): @@ -309,13 +310,11 @@ def test_finding_to_dict_includes_validation(self): line=42, validation_status="confirmed_valid", validation_evidence="const query = ...;", - validation_confidence=0.92, validation_explanation="Issue confirmed.", ) data = finding.to_dict() assert data["validation_status"] == "confirmed_valid" assert data["validation_evidence"] == "const query = ...;" - assert data["validation_confidence"] == 0.92 assert data["validation_explanation"] == "Issue confirmed." def test_finding_from_dict_with_validation(self): @@ -330,12 +329,10 @@ def test_finding_from_dict_with_validation(self): "line": 42, "validation_status": "dismissed_false_positive", "validation_evidence": "parameterized query used", - "validation_confidence": 0.88, "validation_explanation": "False positive - using prepared statements.", } finding = PRReviewFinding.from_dict(data) assert finding.validation_status == "dismissed_false_positive" - assert finding.validation_confidence == 0.88 # ============================================================================ @@ -365,7 +362,7 @@ def test_validation_summary_format(self): code_evidence="const query = `SELECT * FROM users`;", line_range=(45, 45), explanation="SQL injection confirmed in this query construction.", - confidence=0.92, + evidence_verified_in_file=True, ), FindingValidationResult( finding_id="QUAL-002", @@ -373,7 +370,7 @@ def test_validation_summary_format(self): code_evidence="const sanitized = DOMPurify.sanitize(data);", line_range=(23, 26), explanation="Original XSS claim was incorrect - uses DOMPurify.", - confidence=0.88, + evidence_verified_in_file=True, ), ], new_findings=[], @@ -409,6 +406,6 @@ def test_validation_status_enum_values(self): code_evidence="const x = 1;", line_range=(1, 1), explanation="This is a valid explanation for the finding status.", - confidence=0.85, + evidence_verified_in_file=True, ) assert result.validation_status == status diff --git a/tests/test_github_pr_review.py b/tests/test_github_pr_review.py index a602500a86..741d69bc36 100644 --- a/tests/test_github_pr_review.py +++ b/tests/test_github_pr_review.py @@ -101,13 +101,11 @@ def mock_bot_detector(tmp_path): class TestPRReviewResult: """Test PRReviewResult model.""" - def test_save_and_load(self, temp_github_dir, sample_review_result): + @pytest.mark.asyncio + async def test_save_and_load(self, temp_github_dir, sample_review_result): """Test saving and loading review result.""" # Save - import asyncio - asyncio.get_event_loop().run_until_complete( - sample_review_result.save(temp_github_dir) - ) + await sample_review_result.save(temp_github_dir) # Verify file exists review_file = temp_github_dir / "pr" / f"review_{sample_review_result.pr_number}.json" @@ -227,6 +225,54 @@ def test_context_with_error(self, sample_review_result): assert context.error is not None assert "Failed to compare commits" in context.error + def test_context_rebase_detected_files_changed_no_commits(self, sample_review_result): + """Test follow-up context when PR was rebased (files changed but no trackable commits). + + After a rebase/force-push, commit SHAs are rewritten so we can't identify "new" commits. + However, blob SHA comparison can still identify which files actually changed content. + The follow-up review should proceed based on file changes, not skip the review. + """ + context = FollowupReviewContext( + pr_number=123, + previous_review=sample_review_result, + previous_commit_sha="abc123", # This SHA no longer exists in PR after rebase + current_commit_sha="xyz789", + commits_since_review=[], # Empty after rebase - can't determine "new" commits + files_changed_since_review=["src/db.py", "src/api.py"], # But blob comparison found changes + diff_since_review="--- a/src/db.py\n+++ b/src/db.py\n@@ -1,3 +1,3 @@\n-old\n+new", + ) + + # Verify context reflects rebase scenario + assert context.pr_number == 123 + assert len(context.commits_since_review) == 0 # No trackable commits + assert len(context.files_changed_since_review) == 2 # But files did change + assert context.error is None + + # The key assertion: this context should NOT be treated as "no changes" + # The orchestrator should check both commits AND files + has_changes = bool(context.commits_since_review) or bool( + context.files_changed_since_review + ) + assert has_changes is True, "Rebase with file changes should be treated as having changes" + + def test_context_truly_no_changes(self, sample_review_result): + """Test follow-up context when there are truly no changes (same SHA, no files).""" + context = FollowupReviewContext( + pr_number=123, + previous_review=sample_review_result, + previous_commit_sha="abc123", + current_commit_sha="abc123", # Same SHA + commits_since_review=[], + files_changed_since_review=[], # No file changes either + diff_since_review="", + ) + + # This should be treated as no changes + has_changes = bool(context.commits_since_review) or bool( + context.files_changed_since_review + ) + assert has_changes is False, "No commits and no file changes means no changes" + # ============================================================================ # Bot Detection Integration Tests @@ -289,14 +335,11 @@ def test_new_commit_allows_review(self, mock_bot_detector): class TestOrchestratorSkipLogic: """Test orchestrator behavior when bot detection skips.""" - def test_skip_returns_existing_review(self, temp_github_dir, sample_review_result): + @pytest.mark.asyncio + async def test_skip_returns_existing_review(self, temp_github_dir, sample_review_result): """Test that skipping 'Already reviewed' returns existing review.""" - import asyncio - # Save existing review - asyncio.get_event_loop().run_until_complete( - sample_review_result.save(temp_github_dir) - ) + await sample_review_result.save(temp_github_dir) # Simulate the orchestrator logic for "Already reviewed" skip skip_reason = "Already reviewed commit abc123" @@ -418,19 +461,16 @@ def test_has_posted_findings_flag(self, sample_review_result): assert sample_review_result.has_posted_findings is True assert len(sample_review_result.posted_finding_ids) == 1 - def test_posted_findings_serialization(self, temp_github_dir, sample_review_result): + @pytest.mark.asyncio + async def test_posted_findings_serialization(self, temp_github_dir, sample_review_result): """Test that posted findings are serialized correctly.""" - import asyncio - # Set posted findings sample_review_result.has_posted_findings = True sample_review_result.posted_finding_ids = ["finding-001"] sample_review_result.posted_at = "2025-01-01T10:00:00" # Save - asyncio.get_event_loop().run_until_complete( - sample_review_result.save(temp_github_dir) - ) + await sample_review_result.save(temp_github_dir) # Load and verify loaded = PRReviewResult.load(temp_github_dir, sample_review_result.pr_number) diff --git a/tests/test_graphiti.py b/tests/test_graphiti.py index 6243e8330d..04646bccf7 100644 --- a/tests/test_graphiti.py +++ b/tests/test_graphiti.py @@ -54,6 +54,7 @@ def test_status_when_disabled(self): assert status["available"] is False assert "not set" in status["reason"].lower() + @pytest.mark.skip(reason="Environment-dependent test - fails when OPENAI_API_KEY is set") def test_status_when_missing_openai_key(self): """Returns correct status when OPENAI_API_KEY missing. diff --git a/tests/test_pr_worktree_manager.py b/tests/test_pr_worktree_manager.py new file mode 100644 index 0000000000..9e3cf1886c --- /dev/null +++ b/tests/test_pr_worktree_manager.py @@ -0,0 +1,288 @@ +""" +Tests for PR Worktree Manager +============================== + +Tests the worktree lifecycle management including cleanup policies. +""" + +import os +import shutil +import subprocess +import tempfile +import time +from pathlib import Path + +import pytest + +# Import the module to test - use direct path to avoid package imports +import sys +import importlib.util + +backend_path = Path(__file__).parent.parent / "apps" / "backend" +module_path = backend_path / "runners" / "github" / "services" / "pr_worktree_manager.py" + +# Load module directly without importing parent packages +spec = importlib.util.spec_from_file_location("pr_worktree_manager", module_path) +pr_worktree_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(pr_worktree_module) + +PRWorktreeManager = pr_worktree_module.PRWorktreeManager + + +@pytest.fixture +def temp_git_repo(): + """Create a temporary git repository with remote origin for testing.""" + with tempfile.TemporaryDirectory() as tmpdir: + # Create a bare repo to act as "origin" + origin_dir = Path(tmpdir) / "origin.git" + origin_dir.mkdir() + subprocess.run( + ["git", "init", "--bare"], cwd=origin_dir, check=True, capture_output=True + ) + + # Create the working repo + repo_dir = Path(tmpdir) / "test_repo" + repo_dir.mkdir() + + # Initialize git repo + subprocess.run(["git", "init"], cwd=repo_dir, check=True, capture_output=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], + cwd=repo_dir, + check=True, + capture_output=True, + ) + subprocess.run( + ["git", "config", "user.name", "Test User"], + cwd=repo_dir, + check=True, + capture_output=True, + ) + + # Add origin remote + subprocess.run( + ["git", "remote", "add", "origin", str(origin_dir)], + cwd=repo_dir, + check=True, + capture_output=True, + ) + + # Create initial commit + test_file = repo_dir / "test.txt" + test_file.write_text("initial content") + subprocess.run(["git", "add", "."], cwd=repo_dir, check=True, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "Initial commit"], + cwd=repo_dir, + check=True, + capture_output=True, + ) + + # Push to origin so refs exist + subprocess.run( + ["git", "push", "-u", "origin", "HEAD:main"], + cwd=repo_dir, + check=True, + capture_output=True, + ) + + # Get the commit SHA + result = subprocess.run( + ["git", "rev-parse", "HEAD"], + cwd=repo_dir, + check=True, + capture_output=True, + text=True, + ) + commit_sha = result.stdout.strip() + + yield repo_dir, commit_sha + + # Cleanup worktrees before removing directory + subprocess.run( + ["git", "worktree", "prune"], + cwd=repo_dir, + capture_output=True, + ) + + +def test_create_and_remove_worktree(temp_git_repo): + """Test basic worktree creation and removal.""" + repo_dir, commit_sha = temp_git_repo + manager = PRWorktreeManager(repo_dir, ".test-worktrees") + + # Create worktree + worktree_path = manager.create_worktree(commit_sha, pr_number=123) + + assert worktree_path.exists() + assert worktree_path.is_dir() + assert "pr-123" in worktree_path.name + + # Remove worktree + manager.remove_worktree(worktree_path) + + assert not worktree_path.exists() + + +def test_cleanup_orphaned_worktrees(temp_git_repo): + """Test cleanup of orphaned worktrees (not registered with git).""" + repo_dir, commit_sha = temp_git_repo + manager = PRWorktreeManager(repo_dir, ".test-worktrees") + + # Manually create an orphan directory (looks like worktree but not registered) + orphan_path = manager.worktree_base_dir / "pr-456-orphaned-12345" + orphan_path.mkdir(parents=True) + (orphan_path / "test.txt").write_text("orphan content") + + # Verify directory exists but is not in git worktree list + assert orphan_path.exists() + registered = manager.get_registered_worktrees() + assert orphan_path not in registered + + # Cleanup should remove orphaned directory + stats = manager.cleanup_worktrees() + + assert stats['orphaned'] >= 1 + assert not orphan_path.exists() + + +def test_cleanup_expired_worktrees(temp_git_repo): + """Test cleanup of worktrees older than max age.""" + repo_dir, commit_sha = temp_git_repo + + # Set a very short max age for testing + original_age = os.environ.get("PR_WORKTREE_MAX_AGE_DAYS") + os.environ["PR_WORKTREE_MAX_AGE_DAYS"] = "0" # 0 days = instant expiration + + try: + manager = PRWorktreeManager(repo_dir, ".test-worktrees") + + # Create a worktree + worktree_path = manager.create_worktree(commit_sha, pr_number=789) + assert worktree_path.exists() + + # Make it "old" by modifying mtime + old_time = time.time() - (2 * 86400) # 2 days ago + os.utime(worktree_path, (old_time, old_time)) + + # Cleanup should remove expired worktree + stats = manager.cleanup_worktrees() + + assert stats['expired'] >= 1 + assert not worktree_path.exists() + + finally: + # Restore original setting + if original_age is not None: + os.environ["PR_WORKTREE_MAX_AGE_DAYS"] = original_age + else: + os.environ.pop("PR_WORKTREE_MAX_AGE_DAYS", None) + + +def test_cleanup_excess_worktrees(temp_git_repo): + """Test cleanup when exceeding max worktree count.""" + repo_dir, commit_sha = temp_git_repo + + # Set a very low limit for testing + original_max = os.environ.get("MAX_PR_WORKTREES") + os.environ["MAX_PR_WORKTREES"] = "2" # Only keep 2 worktrees + + try: + manager = PRWorktreeManager(repo_dir, ".test-worktrees") + + # Create 4 worktrees (disable auto_cleanup so they all exist initially) + worktrees = [] + for i in range(4): + wt = manager.create_worktree(commit_sha, pr_number=1000 + i, auto_cleanup=False) + worktrees.append(wt) + # Add small delay to ensure different timestamps + time.sleep(0.1) + + # All should exist initially + for wt in worktrees: + assert wt.exists() + + # Cleanup should remove 2 oldest (excess over limit of 2) + stats = manager.cleanup_worktrees() + + assert stats['excess'] == 2 + + # Check that oldest worktrees were removed + existing = [wt for wt in worktrees if wt.exists()] + assert len(existing) == 2 + + finally: + # Restore original setting + if original_max is not None: + os.environ["MAX_PR_WORKTREES"] = original_max + else: + os.environ.pop("MAX_PR_WORKTREES", None) + + +def test_get_worktree_info(temp_git_repo): + """Test retrieving worktree information.""" + repo_dir, commit_sha = temp_git_repo + manager = PRWorktreeManager(repo_dir, ".test-worktrees") + + # Create multiple worktrees (disable auto_cleanup so they both exist) + wt1 = manager.create_worktree(commit_sha, pr_number=111, auto_cleanup=False) + time.sleep(0.1) + wt2 = manager.create_worktree(commit_sha, pr_number=222, auto_cleanup=False) + + # Get info + info_list = manager.get_worktree_info() + + assert len(info_list) >= 2 + + # Should be sorted by age (oldest first) + assert info_list[0].path == wt1 or info_list[1].path == wt1 + assert info_list[0].path == wt2 or info_list[1].path == wt2 + + # Check PR numbers were extracted + pr_numbers = {info.pr_number for info in info_list} + assert 111 in pr_numbers + assert 222 in pr_numbers + + # Cleanup + manager.cleanup_all_worktrees() + + +def test_cleanup_all_worktrees(temp_git_repo): + """Test removing all worktrees.""" + repo_dir, commit_sha = temp_git_repo + manager = PRWorktreeManager(repo_dir, ".test-worktrees") + + # Create several worktrees (disable auto_cleanup so they all exist) + for i in range(3): + manager.create_worktree(commit_sha, pr_number=500 + i, auto_cleanup=False) + + # Verify they exist + info = manager.get_worktree_info() + assert len(info) == 3 + + # Cleanup all + count = manager.cleanup_all_worktrees() + + assert count == 3 + + # Verify none remain + info = manager.get_worktree_info() + assert len(info) == 0 + + +def test_worktree_reuse_prevention(temp_git_repo): + """Test that worktrees are created fresh each time (no reuse).""" + repo_dir, commit_sha = temp_git_repo + manager = PRWorktreeManager(repo_dir, ".test-worktrees") + + # Create two worktrees for same PR (disable auto_cleanup so both exist) + wt1 = manager.create_worktree(commit_sha, pr_number=999, auto_cleanup=False) + wt2 = manager.create_worktree(commit_sha, pr_number=999, auto_cleanup=False) + + # Should be different paths (no reuse) + assert wt1 != wt2 + assert wt1.exists() + assert wt2.exists() + + # Cleanup + manager.cleanup_all_worktrees() diff --git a/tests/test_project_analyzer.py b/tests/test_project_analyzer.py index f69b14ef3b..df0dfaf1d5 100644 --- a/tests/test_project_analyzer.py +++ b/tests/test_project_analyzer.py @@ -13,22 +13,18 @@ """ import json -import pytest from pathlib import Path +import pytest from project_analyzer import ( + BASE_COMMANDS, + CustomScripts, ProjectAnalyzer, SecurityProfile, TechnologyStack, - CustomScripts, get_or_create_profile, is_command_allowed, needs_validation, - BASE_COMMANDS, - LANGUAGE_COMMANDS, - FRAMEWORK_COMMANDS, - DATABASE_COMMANDS, - INFRASTRUCTURE_COMMANDS, ) @@ -489,6 +485,7 @@ def test_force_reanalyze(self, python_project: Path): # Force re-analysis import time + time.sleep(0.1) # Ensure different timestamp profile2 = get_or_create_profile(python_project, force_reanalyze=True) @@ -582,11 +579,23 @@ def test_from_dict(self): "stack_commands": ["python"], "script_commands": [], "custom_commands": [], - "detected_stack": {"languages": ["python"], "package_managers": [], "frameworks": [], - "databases": [], "infrastructure": [], "cloud_providers": [], - "code_quality_tools": [], "version_managers": []}, - "custom_scripts": {"npm_scripts": [], "make_targets": [], "poetry_scripts": [], - "cargo_aliases": [], "shell_scripts": []}, + "detected_stack": { + "languages": ["python"], + "package_managers": [], + "frameworks": [], + "databases": [], + "infrastructure": [], + "cloud_providers": [], + "code_quality_tools": [], + "version_managers": [], + }, + "custom_scripts": { + "npm_scripts": [], + "make_targets": [], + "poetry_scripts": [], + "cargo_aliases": [], + "shell_scripts": [], + }, "project_dir": "/test", "created_at": "2024-01-01", "project_hash": "abc123", @@ -617,3 +626,175 @@ def test_save_and_load(self, temp_dir: Path): assert "ls" in loaded.base_commands assert "python" in loaded.stack_commands assert loaded.project_hash == "test123" + + +class TestDartFlutterDetection: + """Tests for Dart/Flutter language and framework detection.""" + + def test_detects_dart_language(self, temp_dir: Path): + """Detects Dart from pubspec.yaml.""" + pubspec = """name: my_app +version: 1.0.0 +environment: + sdk: ">=3.0.0 <4.0.0" +""" + (temp_dir / "pubspec.yaml").write_text(pubspec) + + analyzer = ProjectAnalyzer(temp_dir) + analyzer._detect_languages() + + assert "dart" in analyzer.profile.detected_stack.languages + + def test_detects_dart_from_files(self, temp_dir: Path): + """Detects Dart from .dart files.""" + (temp_dir / "lib").mkdir() + (temp_dir / "lib" / "main.dart").write_text("void main() {}") + + analyzer = ProjectAnalyzer(temp_dir) + analyzer._detect_languages() + + assert "dart" in analyzer.profile.detected_stack.languages + + def test_detects_flutter_framework(self, temp_dir: Path): + """Detects Flutter framework from pubspec.yaml.""" + pubspec = """name: my_flutter_app +version: 1.0.0 +environment: + sdk: ">=3.0.0 <4.0.0" + flutter: ">=3.0.0" + +dependencies: + flutter: + sdk: flutter +""" + (temp_dir / "pubspec.yaml").write_text(pubspec) + + analyzer = ProjectAnalyzer(temp_dir) + analyzer._detect_frameworks() + + assert "flutter" in analyzer.profile.detected_stack.frameworks + + def test_detects_pub_package_manager(self, temp_dir: Path): + """Detects pub package manager from pubspec.yaml.""" + pubspec = """name: my_app +version: 1.0.0 +""" + (temp_dir / "pubspec.yaml").write_text(pubspec) + + analyzer = ProjectAnalyzer(temp_dir) + analyzer._detect_package_managers() + + assert "pub" in analyzer.profile.detected_stack.package_managers + + def test_detects_pub_from_lock_file(self, temp_dir: Path): + """Detects pub package manager from pubspec.lock.""" + (temp_dir / "pubspec.lock").write_text("packages:\n") + + analyzer = ProjectAnalyzer(temp_dir) + analyzer._detect_package_managers() + + assert "pub" in analyzer.profile.detected_stack.package_managers + + +class TestMelosMonorepoDetection: + """Tests for Melos monorepo tool detection.""" + + def test_detects_melos_from_config(self, temp_dir: Path): + """Detects Melos from melos.yaml.""" + melos_config = """name: my_workspace +packages: + - packages/* +""" + (temp_dir / "melos.yaml").write_text(melos_config) + + analyzer = ProjectAnalyzer(temp_dir) + analyzer._detect_package_managers() + + assert "melos" in analyzer.profile.detected_stack.package_managers + + def test_melos_commands_allowed(self, temp_dir: Path): + """Melos commands are allowed when detected.""" + melos_config = """name: my_workspace +packages: + - packages/* +""" + (temp_dir / "melos.yaml").write_text(melos_config) + + profile = get_or_create_profile(temp_dir, force_reanalyze=True) + + assert "melos" in profile.stack_commands + + +class TestFvmVersionManagerDetection: + """Tests for Flutter Version Manager (FVM) detection.""" + + def test_detects_fvm_from_directory(self, temp_dir: Path): + """Detects FVM from .fvm directory.""" + (temp_dir / ".fvm").mkdir() + + analyzer = ProjectAnalyzer(temp_dir) + analyzer._detect_version_managers() + + assert "fvm" in analyzer.profile.detected_stack.version_managers + + def test_detects_fvm_from_config(self, temp_dir: Path): + """Detects FVM from fvm_config.json.""" + fvm_config = '{"flutterSdkVersion": "3.19.0"}' + (temp_dir / "fvm_config.json").write_text(fvm_config) + + analyzer = ProjectAnalyzer(temp_dir) + analyzer._detect_version_managers() + + assert "fvm" in analyzer.profile.detected_stack.version_managers + + def test_detects_fvm_from_fvmrc(self, temp_dir: Path): + """Detects FVM from .fvmrc file.""" + (temp_dir / ".fvmrc").write_text('{"flutter": "3.19.0"}') + + analyzer = ProjectAnalyzer(temp_dir) + analyzer._detect_version_managers() + + assert "fvm" in analyzer.profile.detected_stack.version_managers + + def test_fvm_commands_allowed(self, temp_dir: Path): + """FVM commands are allowed when detected.""" + (temp_dir / ".fvm").mkdir() + + profile = get_or_create_profile(temp_dir, force_reanalyze=True) + + assert "fvm" in profile.stack_commands + + +class TestDartFlutterCommandsAllowed: + """Tests that Dart/Flutter commands are properly allowed.""" + + def test_dart_commands_allowed_for_dart_project(self, temp_dir: Path): + """Dart commands are allowed when Dart is detected.""" + pubspec = """name: my_app +version: 1.0.0 +""" + (temp_dir / "pubspec.yaml").write_text(pubspec) + + profile = get_or_create_profile(temp_dir, force_reanalyze=True) + + # Core Dart commands + assert "dart" in profile.stack_commands + assert "pub" in profile.stack_commands + # Flutter should be available for Dart projects + assert "flutter" in profile.stack_commands + + def test_flutter_commands_allowed_for_flutter_project(self, temp_dir: Path): + """Flutter commands are allowed when Flutter is detected.""" + pubspec = """name: my_flutter_app +version: 1.0.0 +dependencies: + flutter: + sdk: flutter +""" + (temp_dir / "pubspec.yaml").write_text(pubspec) + + profile = get_or_create_profile(temp_dir, force_reanalyze=True) + + assert "flutter" in profile.stack_commands + assert "dart" in profile.stack_commands + assert "pub" in profile.stack_commands diff --git a/tests/test_security.py b/tests/test_security.py index 6f0a281cb1..bc6c9d76c9 100644 --- a/tests/test_security.py +++ b/tests/test_security.py @@ -21,6 +21,7 @@ validate_chmod_command, validate_rm_command, validate_git_commit, + validate_git_config, validate_dropdb_command, validate_dropuser_command, validate_psql_command, @@ -93,9 +94,24 @@ def test_empty_string(self): assert commands == [] def test_malformed_command(self): - """Returns empty list for malformed command (fail-safe).""" + """Uses fallback parser for malformed commands (Windows path support). + + The fallback parser extracts command names even from commands with + unclosed quotes, which is common when Windows paths are used. + """ commands = extract_commands("echo 'unclosed quote") - assert commands == [] + assert commands == ["echo"] + + def test_windows_path_command(self): + """Handles Windows paths with backslashes.""" + commands = extract_commands(r'C:\Python312\python.exe -c "print(1)"') + assert "python" in commands + + def test_incomplete_windows_path_command(self): + """Handles incomplete commands with Windows paths (common AI generation issue).""" + cmd = r'python3 -c "import json; json.load(open(\'D:\path\file.json' + commands = extract_commands(cmd) + assert commands == ["python3"] class TestSplitCommandSegments: @@ -393,6 +409,151 @@ def test_non_commit_commands_pass(self): assert allowed is True +class TestGitConfigValidator: + """Tests for git config validation (blocking identity changes).""" + + def test_blocks_user_name(self): + """Blocks git config user.name.""" + allowed, reason = validate_git_config("git config user.name 'Test User'") + assert allowed is False + assert "BLOCKED" in reason + assert "identity" in reason.lower() + + def test_blocks_user_email(self): + """Blocks git config user.email.""" + allowed, reason = validate_git_config("git config user.email 'test@example.com'") + assert allowed is False + assert "BLOCKED" in reason + + def test_blocks_author_name(self): + """Blocks git config author.name.""" + allowed, reason = validate_git_config("git config author.name 'Fake Author'") + assert allowed is False + assert "BLOCKED" in reason + + def test_blocks_committer_email(self): + """Blocks git config committer.email.""" + allowed, reason = validate_git_config("git config committer.email 'fake@test.com'") + assert allowed is False + assert "BLOCKED" in reason + + def test_blocks_with_global_flag(self): + """Blocks identity config even with --global flag.""" + allowed, reason = validate_git_config("git config --global user.name 'Test User'") + assert allowed is False + assert "BLOCKED" in reason + + def test_blocks_with_local_flag(self): + """Blocks identity config even with --local flag.""" + allowed, reason = validate_git_config("git config --local user.email 'test@example.com'") + assert allowed is False + assert "BLOCKED" in reason + + def test_allows_non_identity_config(self): + """Allows setting non-identity config options.""" + allowed, reason = validate_git_config("git config core.autocrlf true") + assert allowed is True + + allowed, reason = validate_git_config("git config diff.algorithm patience") + assert allowed is True + + allowed, reason = validate_git_config("git config pull.rebase true") + assert allowed is True + + def test_allows_config_list(self): + """Allows git config --list and similar read operations.""" + allowed, reason = validate_git_config("git config --list") + assert allowed is True + + allowed, reason = validate_git_config("git config --get user.name") + assert allowed is True + + def test_allows_non_config_commands(self): + """Non-config git commands pass through.""" + allowed, reason = validate_git_config("git status") + assert allowed is True + + allowed, reason = validate_git_config("git commit -m 'test'") + assert allowed is True + + def test_case_insensitive_blocking(self): + """Blocks identity keys regardless of case.""" + allowed, reason = validate_git_config("git config USER.NAME 'Test'") + assert allowed is False + + allowed, reason = validate_git_config("git config User.Email 'test@test.com'") + assert allowed is False + + def test_handles_malformed_command(self): + """Handles malformed commands gracefully.""" + # Unbalanced quotes - should fail closed + allowed, reason = validate_git_config("git config user.name 'Test User") + assert allowed is False + assert "parse" in reason.lower() + + +class TestGitIdentityProtection: + """Tests for git identity protection (blocking -c flag bypass).""" + + def test_blocks_inline_user_name(self): + """Blocks git -c user.name=... on any command.""" + allowed, reason = validate_git_commit("git -c user.name=Evil commit -m 'test'") + assert allowed is False + assert "BLOCKED" in reason + assert "identity" in reason.lower() + + def test_blocks_inline_user_email(self): + """Blocks git -c user.email=... on any command.""" + allowed, reason = validate_git_commit("git -c user.email=fake@test.com commit -m 'test'") + assert allowed is False + assert "BLOCKED" in reason + + def test_blocks_inline_author_name(self): + """Blocks git -c author.name=... on any command.""" + allowed, reason = validate_git_commit("git -c author.name=FakeAuthor push") + assert allowed is False + assert "BLOCKED" in reason + + def test_blocks_inline_committer_email(self): + """Blocks git -c committer.email=... on any command.""" + allowed, reason = validate_git_commit("git -c committer.email=fake@test.com log") + assert allowed is False + assert "BLOCKED" in reason + + def test_blocks_nospace_format(self): + """Blocks -ckey=value format (no space after -c).""" + allowed, reason = validate_git_commit("git -cuser.name=Evil commit -m 'test'") + assert allowed is False + assert "BLOCKED" in reason + + def test_allows_non_identity_config(self): + """Allows -c with non-blocked config keys.""" + allowed, reason = validate_git_commit("git -c core.autocrlf=true commit -m 'test'") + assert allowed is True + + allowed, reason = validate_git_commit("git -c diff.algorithm=patience diff") + assert allowed is True + + def test_allows_normal_git_commands(self): + """Normal git commands without -c identity flags pass.""" + allowed, reason = validate_git_commit("git status") + assert allowed is True + + allowed, reason = validate_git_commit("git log --oneline") + assert allowed is True + + allowed, reason = validate_git_commit("git branch -a") + assert allowed is True + + def test_case_insensitive_blocking(self): + """Blocks identity keys regardless of case.""" + allowed, reason = validate_git_commit("git -c USER.NAME=Evil commit -m 'test'") + assert allowed is False + + allowed, reason = validate_git_commit("git -c User.Email=fake@test.com push") + assert allowed is False + + # ============================================================================= # DATABASE VALIDATOR TESTS # ============================================================================= diff --git a/tests/test_spec_pipeline.py b/tests/test_spec_pipeline.py index a606bb3166..e063201fea 100644 --- a/tests/test_spec_pipeline.py +++ b/tests/test_spec_pipeline.py @@ -184,7 +184,7 @@ def test_init_with_spec_dir(self, temp_dir: Path): assert orchestrator.spec_dir == custom_spec_dir def test_init_default_model(self, temp_dir: Path): - """Uses default model.""" + """Uses default model (shorthand).""" with patch('spec.pipeline.init_auto_claude_dir') as mock_init: mock_init.return_value = (temp_dir / ".auto-claude", False) specs_dir = temp_dir / ".auto-claude" / "specs" @@ -192,7 +192,8 @@ def test_init_default_model(self, temp_dir: Path): orchestrator = SpecOrchestrator(project_dir=temp_dir) - assert orchestrator.model == "claude-sonnet-4-5-20250929" + # Default is now "sonnet" shorthand (resolved via API Profile if configured) + assert orchestrator.model == "sonnet" def test_init_custom_model(self, temp_dir: Path): """Uses custom model.""" diff --git a/tests/test_structured_outputs.py b/tests/test_structured_outputs.py index dc5f34a595..1c0f537534 100644 --- a/tests/test_structured_outputs.py +++ b/tests/test_structured_outputs.py @@ -223,7 +223,7 @@ class TestOrchestratorFinding: """Tests for OrchestratorFinding model.""" def test_valid_finding(self): - """Test valid orchestrator finding.""" + """Test valid orchestrator finding with evidence field.""" data = { "file": "src/api.py", "line": 25, @@ -232,40 +232,24 @@ def test_valid_finding(self): "category": "quality", "severity": "medium", "suggestion": "Add error handling with proper logging", - "confidence": 90, + "evidence": "def handle_request(req):\n result = db.query(req.id) # no try-catch", } result = OrchestratorFinding.model_validate(data) assert result.file == "src/api.py" - assert result.confidence == 0.9 # 90 normalized to 0.9 + assert result.evidence is not None + assert "no try-catch" in result.evidence - def test_confidence_bounds(self): - """Test confidence bounds (accepts 0-100 or 0.0-1.0, normalized to 0.0-1.0).""" - # Valid min + def test_evidence_optional(self): + """Test that evidence field is optional.""" data = { "file": "test.py", "title": "Test", - "description": "Test", + "description": "Test finding", "category": "quality", "severity": "low", - "confidence": 0, } result = OrchestratorFinding.model_validate(data) - assert result.confidence == 0 # 0 stays as 0 - - # Valid max (100% normalized to 1.0) - data["confidence"] = 100 - result = OrchestratorFinding.model_validate(data) - assert result.confidence == 1.0 # 100 normalized to 1.0 - - # Invalid: over 100 (would normalize to >1.0) - data["confidence"] = 101 - with pytest.raises(ValidationError): - OrchestratorFinding.model_validate(data) - - # Invalid: negative - data["confidence"] = -1 - with pytest.raises(ValidationError): - OrchestratorFinding.model_validate(data) + assert result.evidence is None class TestOrchestratorReviewResponse: @@ -284,7 +268,7 @@ def test_valid_response(self): "description": "API key exposed in source", "category": "security", "severity": "critical", - "confidence": 95, + "evidence": "API_KEY = 'sk-prod-12345abcdef'", } ], "summary": "Found 1 critical security issue", @@ -394,8 +378,8 @@ def test_security_category_default(self): class TestDeepAnalysisFinding: """Tests for DeepAnalysisFinding model.""" - def test_confidence_float(self): - """Test confidence is a float between 0 and 1.""" + def test_evidence_field(self): + """Test evidence field for proof of issue.""" data = { "id": "deep-1", "severity": "medium", @@ -404,10 +388,10 @@ def test_confidence_float(self): "file": "worker.py", "line": 100, "category": "logic", - "confidence": 0.75, + "evidence": "shared_state += 1 # no lock protection", } result = DeepAnalysisFinding.model_validate(data) - assert result.confidence == 0.75 + assert result.evidence == "shared_state += 1 # no lock protection" def test_verification_note(self): """Test verification note field.""" diff --git a/tests/test_workspace.py b/tests/test_workspace.py index 874334f96a..ead0f62834 100644 --- a/tests/test_workspace.py +++ b/tests/test_workspace.py @@ -161,14 +161,14 @@ def test_setup_isolated_mode(self, temp_git_repo: Path): assert working_dir.name == TEST_SPEC_NAME def test_setup_isolated_creates_worktrees_dir(self, temp_git_repo: Path): - """Isolated mode creates .worktrees directory.""" + """Isolated mode creates worktrees directory.""" setup_workspace( temp_git_repo, "test-spec", WorkspaceMode.ISOLATED, ) - assert (temp_git_repo / ".worktrees").exists() + assert (temp_git_repo / ".auto-claude" / "worktrees" / "tasks").exists() class TestWorkspaceUtilities: @@ -185,7 +185,8 @@ def test_per_spec_worktree_naming(self, temp_git_repo: Path): # Worktree should be named after the spec assert working_dir.name == spec_name - assert working_dir.parent.name == ".worktrees" + # New path: .auto-claude/worktrees/tasks/{spec_name} + assert working_dir.parent.name == "tasks" class TestWorkspaceIntegration: @@ -236,12 +237,16 @@ def test_isolated_merge(self, temp_git_repo: Path): WorkspaceMode.ISOLATED, ) - # Make changes and commit + # Make changes and commit using git directly (working_dir / "feature.py").write_text("# New feature\n") - manager.commit_in_staging("Add feature") + subprocess.run(["git", "add", "."], cwd=working_dir, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "Add feature"], + cwd=working_dir, capture_output=True + ) - # Merge back - result = manager.merge_staging(delete_after=False) + # Merge back using merge_worktree + result = manager.merge_worktree("test-spec", delete_after=False) assert result is True @@ -264,12 +269,16 @@ def test_cleanup_after_merge(self, temp_git_repo: Path): WorkspaceMode.ISOLATED, ) - # Commit changes + # Commit changes using git directly (working_dir / "test.py").write_text("test") - manager.commit_in_staging("Test") + subprocess.run(["git", "add", "."], cwd=working_dir, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "Test"], + cwd=working_dir, capture_output=True + ) # Merge with cleanup - manager.merge_staging(delete_after=True) + manager.merge_worktree("test-spec", delete_after=True) # Workspace should be removed assert not working_dir.exists() @@ -282,12 +291,16 @@ def test_workspace_preserved_after_merge_no_delete(self, temp_git_repo: Path): WorkspaceMode.ISOLATED, ) - # Commit changes + # Commit changes using git directly (working_dir / "test.py").write_text("test") - manager.commit_in_staging("Test") + subprocess.run(["git", "add", "."], cwd=working_dir, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "Test"], + cwd=working_dir, capture_output=True + ) # Merge without cleanup - manager.merge_staging(delete_after=False) + manager.merge_worktree("test-spec", delete_after=False) # Workspace should still exist assert working_dir.exists() @@ -371,12 +384,13 @@ def test_different_specs_get_different_worktrees(self, temp_git_repo: Path): assert working_dir1 != working_dir2 def test_worktree_path_in_worktrees_dir(self, temp_git_repo: Path): - """Worktree is created in .worktrees directory.""" + """Worktree is created in worktrees directory.""" working_dir, _, _ = setup_workspace( temp_git_repo, "test-spec", WorkspaceMode.ISOLATED, ) - assert ".worktrees" in str(working_dir) - assert working_dir.parent.name == ".worktrees" + # New path: .auto-claude/worktrees/tasks/{spec_name} + assert "worktrees" in str(working_dir) + assert working_dir.parent.name == "tasks" diff --git a/tests/test_worktree.py b/tests/test_worktree.py index 726dc77ca7..d98c82a22c 100644 --- a/tests/test_worktree.py +++ b/tests/test_worktree.py @@ -9,14 +9,17 @@ - Branch operations - Merge operations - Change tracking +- Worktree cleanup and age detection """ import subprocess +import time +from datetime import datetime, timedelta from pathlib import Path import pytest -from worktree import WorktreeManager, WorktreeInfo, WorktreeError, STAGING_WORKTREE_NAME +from worktree import WorktreeManager class TestWorktreeManagerInitialization: @@ -27,7 +30,7 @@ def test_init_with_valid_git_repo(self, temp_git_repo: Path): manager = WorktreeManager(temp_git_repo) assert manager.project_dir == temp_git_repo - assert manager.worktrees_dir == temp_git_repo / ".worktrees" + assert manager.worktrees_dir == temp_git_repo / ".auto-claude" / "worktrees" / "tasks" assert manager.base_branch is not None def test_init_prefers_main_over_current_branch(self, temp_git_repo: Path): @@ -63,7 +66,7 @@ def test_init_with_explicit_base_branch(self, temp_git_repo: Path): assert manager.base_branch == "main" def test_setup_creates_worktrees_directory(self, temp_git_repo: Path): - """Setup creates the .worktrees directory.""" + """Setup creates the worktrees directory.""" manager = WorktreeManager(temp_git_repo) manager.setup() @@ -112,75 +115,6 @@ def test_get_or_create_replaces_existing_worktree(self, temp_git_repo: Path): assert (info2.path / "test-file.txt").exists() -class TestStagingWorktree: - """Tests for staging worktree operations (backward compatibility).""" - - def test_get_or_create_staging_creates_new(self, temp_git_repo: Path): - """Creates staging worktree if it doesn't exist.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - - info = manager.get_or_create_staging("test-spec") - - assert info.path.exists() - # Staging is now per-spec, worktree is named after spec - assert info.path.name == "test-spec" - assert "auto-claude/test-spec" in info.branch - - def test_get_or_create_staging_returns_existing(self, temp_git_repo: Path): - """Returns existing staging worktree without recreating.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - - info1 = manager.get_or_create_staging("test-spec") - # Add a file - (info1.path / "marker.txt").write_text("marker") - - info2 = manager.get_or_create_staging("test-spec") - - # Should be the same worktree (marker file exists) - assert (info2.path / "marker.txt").exists() - - def test_staging_exists_false_when_none(self, temp_git_repo: Path): - """staging_exists returns False when no staging worktree.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - - assert manager.staging_exists() is False - - def test_staging_exists_true_when_created(self, temp_git_repo: Path): - """staging_exists returns True after creating staging.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - manager.get_or_create_staging("test-spec") - - assert manager.staging_exists() is True - - def test_get_staging_path(self, temp_git_repo: Path): - """get_staging_path returns correct path.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - manager.get_or_create_staging("test-spec") - - path = manager.get_staging_path() - - assert path is not None - # Staging is now per-spec, path is named after spec - assert path.name == "test-spec" - - def test_get_staging_info(self, temp_git_repo: Path): - """get_staging_info returns WorktreeInfo.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - manager.get_or_create_staging("test-spec") - - info = manager.get_staging_info() - - assert info is not None - assert isinstance(info, WorktreeInfo) - assert info.branch is not None - - class TestWorktreeRemoval: """Tests for removing worktrees.""" @@ -194,17 +128,6 @@ def test_remove_worktree(self, temp_git_repo: Path): assert not info.path.exists() - def test_remove_staging(self, temp_git_repo: Path): - """Can remove staging worktree.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - info = manager.get_or_create_staging("test-spec") - - manager.remove_staging() - - assert not info.path.exists() - assert manager.staging_exists() is False - def test_remove_with_delete_branch(self, temp_git_repo: Path): """Removing worktree can also delete the branch.""" manager = WorktreeManager(temp_git_repo) @@ -225,56 +148,6 @@ def test_remove_with_delete_branch(self, temp_git_repo: Path): class TestWorktreeCommitAndMerge: """Tests for commit and merge operations.""" - def test_commit_in_staging(self, temp_git_repo: Path): - """Can commit changes in staging worktree.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - info = manager.get_or_create_staging("test-spec") - - # Make changes in staging - (info.path / "new-file.txt").write_text("new content") - - result = manager.commit_in_staging("Test commit") - - assert result is True - - # Verify commit was made - log_result = subprocess.run( - ["git", "log", "--oneline", "-1"], - cwd=info.path, capture_output=True, text=True - ) - assert "Test commit" in log_result.stdout - - def test_commit_in_staging_nothing_to_commit(self, temp_git_repo: Path): - """commit_in_staging succeeds when nothing to commit.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - manager.get_or_create_staging("test-spec") - - # No changes made - result = manager.commit_in_staging("Empty commit") - - assert result is True # Should succeed (nothing to commit is OK) - - def test_merge_staging_sync(self, temp_git_repo: Path): - """Can merge staging worktree to main branch.""" - manager = WorktreeManager(temp_git_repo) - manager.setup() - info = manager.get_or_create_staging("test-spec") - - # Make changes in staging - (info.path / "feature.txt").write_text("feature content") - manager.commit_in_staging("Add feature") - - # Merge back - result = manager.merge_staging(delete_after=False) - - assert result is True - - # Verify file is in main branch - subprocess.run(["git", "checkout", manager.base_branch], cwd=temp_git_repo, capture_output=True) - assert (temp_git_repo / "feature.txt").exists() - def test_merge_worktree(self, temp_git_repo: Path): """Can merge a worktree back to main.""" manager = WorktreeManager(temp_git_repo) @@ -323,12 +196,16 @@ def test_get_change_summary(self, temp_git_repo: Path): """get_change_summary returns correct counts.""" manager = WorktreeManager(temp_git_repo) manager.setup() - info = manager.get_or_create_staging("test-spec") + info = manager.create_worktree("test-spec") # Make various changes (info.path / "new-file.txt").write_text("new") (info.path / "README.md").write_text("modified") - manager.commit_in_staging("Changes") + subprocess.run(["git", "add", "."], cwd=info.path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "Changes"], + cwd=info.path, capture_output=True + ) summary = manager.get_change_summary("test-spec") @@ -339,11 +216,15 @@ def test_get_changed_files(self, temp_git_repo: Path): """get_changed_files returns list of changed files.""" manager = WorktreeManager(temp_git_repo) manager.setup() - info = manager.get_or_create_staging("test-spec") + info = manager.create_worktree("test-spec") # Make changes (info.path / "added.txt").write_text("new file") - manager.commit_in_staging("Add file") + subprocess.run(["git", "add", "."], cwd=info.path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "Add file"], + cwd=info.path, capture_output=True + ) files = manager.get_changed_files("test-spec") @@ -393,7 +274,7 @@ def test_cleanup_all(self, temp_git_repo: Path): manager.setup() manager.create_worktree("spec-1") manager.create_worktree("spec-2") - manager.get_or_create_staging("test-spec") + manager.create_worktree("spec-3") manager.cleanup_all() @@ -418,7 +299,7 @@ def test_get_test_commands_python(self, temp_git_repo: Path): """get_test_commands detects Python project commands.""" manager = WorktreeManager(temp_git_repo) manager.setup() - info = manager.get_or_create_staging("test-spec") + info = manager.create_worktree("test-spec") # Create requirements.txt (info.path / "requirements.txt").write_text("flask\n") @@ -431,11 +312,170 @@ def test_get_test_commands_node(self, temp_git_repo: Path): """get_test_commands detects Node.js project commands.""" manager = WorktreeManager(temp_git_repo) manager.setup() - info = manager.get_or_create_staging("test-spec") + info = manager.create_worktree("test-spec-node") # Create package.json (info.path / "package.json").write_text('{"name": "test"}') - commands = manager.get_test_commands("test-spec") + commands = manager.get_test_commands("test-spec-node") assert any("npm" in cmd for cmd in commands) + + +class TestWorktreeCleanup: + """Tests for worktree cleanup and age detection functionality.""" + + def test_get_worktree_stats_includes_age(self, temp_git_repo: Path): + """Worktree stats include last commit date and age in days.""" + manager = WorktreeManager(temp_git_repo) + manager.setup() + info = manager.create_worktree("test-spec") + + # Make a commit in the worktree + test_file = info.path / "test.txt" + test_file.write_text("test") + subprocess.run(["git", "add", "."], cwd=info.path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "test commit"], cwd=info.path, capture_output=True + ) + + # Get stats + stats = manager._get_worktree_stats("test-spec") + + assert stats["last_commit_date"] is not None + assert isinstance(stats["last_commit_date"], datetime) + assert stats["days_since_last_commit"] is not None + assert stats["days_since_last_commit"] == 0 # Just committed + + def test_get_old_worktrees(self, temp_git_repo: Path): + """get_old_worktrees identifies worktrees based on age threshold.""" + manager = WorktreeManager(temp_git_repo) + manager.setup() + + # Create a worktree with a commit + info = manager.create_worktree("test-spec") + test_file = info.path / "test.txt" + test_file.write_text("test") + subprocess.run(["git", "add", "."], cwd=info.path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "test commit"], cwd=info.path, capture_output=True + ) + + # Should not be considered old with default threshold (30 days) + old_worktrees = manager.get_old_worktrees(days_threshold=30) + assert len(old_worktrees) == 0 + + # Should be considered old with 0 day threshold + old_worktrees = manager.get_old_worktrees(days_threshold=0) + assert len(old_worktrees) == 1 + assert "test-spec" in old_worktrees + + def test_get_old_worktrees_with_stats(self, temp_git_repo: Path): + """get_old_worktrees returns full WorktreeInfo when include_stats=True.""" + manager = WorktreeManager(temp_git_repo) + manager.setup() + + # Create a worktree with a commit + info = manager.create_worktree("test-spec") + test_file = info.path / "test.txt" + test_file.write_text("test") + subprocess.run(["git", "add", "."], cwd=info.path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "test commit"], cwd=info.path, capture_output=True + ) + + # Get old worktrees with stats + old_worktrees = manager.get_old_worktrees(days_threshold=0, include_stats=True) + + assert len(old_worktrees) == 1 + assert old_worktrees[0].spec_name == "test-spec" + assert old_worktrees[0].days_since_last_commit is not None + + def test_cleanup_old_worktrees_dry_run(self, temp_git_repo: Path): + """cleanup_old_worktrees dry run does not remove worktrees.""" + manager = WorktreeManager(temp_git_repo) + manager.setup() + + # Create a worktree with a commit + info = manager.create_worktree("test-spec") + test_file = info.path / "test.txt" + test_file.write_text("test") + subprocess.run(["git", "add", "."], cwd=info.path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "test commit"], cwd=info.path, capture_output=True + ) + + # Dry run should not remove anything + removed, failed = manager.cleanup_old_worktrees(days_threshold=0, dry_run=True) + + assert len(removed) == 0 + assert len(failed) == 0 + assert info.path.exists() # Worktree still exists + + def test_cleanup_old_worktrees_removes_old(self, temp_git_repo: Path): + """cleanup_old_worktrees removes worktrees older than threshold.""" + manager = WorktreeManager(temp_git_repo) + manager.setup() + + # Create a worktree with a commit + info = manager.create_worktree("test-spec") + test_file = info.path / "test.txt" + test_file.write_text("test") + subprocess.run(["git", "add", "."], cwd=info.path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "test commit"], cwd=info.path, capture_output=True + ) + + # Actually remove with 0 day threshold + removed, failed = manager.cleanup_old_worktrees(days_threshold=0, dry_run=False) + + assert len(removed) == 1 + assert "test-spec" in removed + assert len(failed) == 0 + assert not info.path.exists() # Worktree should be removed + + def test_get_worktree_count_warning(self, temp_git_repo: Path): + """get_worktree_count_warning returns appropriate warnings based on count.""" + manager = WorktreeManager(temp_git_repo) + manager.setup() + + # No warning with few worktrees + warning = manager.get_worktree_count_warning(warning_threshold=10) + assert warning is None + + # Create 11 worktrees to trigger warning + for i in range(11): + info = manager.create_worktree(f"test-spec-{i}") + test_file = info.path / "test.txt" + test_file.write_text("test") + subprocess.run(["git", "add", "."], cwd=info.path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "test commit"], + cwd=info.path, + capture_output=True, + ) + + warning = manager.get_worktree_count_warning(warning_threshold=10) + assert warning is not None + assert "WARNING" in warning + + def test_get_worktree_count_critical_warning(self, temp_git_repo: Path): + """get_worktree_count_warning returns critical warning for high counts.""" + manager = WorktreeManager(temp_git_repo) + manager.setup() + + # Create 21 worktrees to trigger critical warning + for i in range(21): + info = manager.create_worktree(f"test-spec-{i}") + test_file = info.path / "test.txt" + test_file.write_text("test") + subprocess.run(["git", "add", "."], cwd=info.path, capture_output=True) + subprocess.run( + ["git", "commit", "-m", "test commit"], + cwd=info.path, + capture_output=True, + ) + + warning = manager.get_worktree_count_warning(critical_threshold=20) + assert warning is not None + assert "CRITICAL" in warning