diff --git a/.github/workflows/beta-release.yml b/.github/workflows/beta-release.yml
index f19d3e607a..271ff679a9 100644
--- a/.github/workflows/beta-release.yml
+++ b/.github/workflows/beta-release.yml
@@ -97,16 +97,28 @@ jobs:
- name: Install Rust toolchain (for building native Python packages)
uses: dtolnay/rust-toolchain@stable
+ - name: Cache pip wheel cache (for compiled packages like real_ladybug)
+ uses: actions/cache@v4
+ with:
+ path: ~/Library/Caches/pip
+ key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }}
+ restore-keys: |
+ pip-wheel-${{ runner.os }}-x64-
+
- name: Cache bundled Python
uses: actions/cache@v4
with:
path: apps/frontend/python-runtime
- key: python-bundle-${{ runner.os }}-x64-3.12.8-rust
+ key: python-bundle-${{ runner.os }}-x64-3.12.8-rust-${{ hashFiles('apps/backend/requirements.txt') }}
restore-keys: |
- python-bundle-${{ runner.os }}-x64-
+ python-bundle-${{ runner.os }}-x64-3.12.8-rust-
- name: Build application
run: cd apps/frontend && npm run build
+ env:
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Package macOS (Intel)
run: |
@@ -116,6 +128,9 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: ${{ secrets.MAC_CERTIFICATE }}
CSC_KEY_PASSWORD: ${{ secrets.MAC_CERTIFICATE_PASSWORD }}
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Notarize macOS Intel app
env:
@@ -181,16 +196,28 @@ jobs:
- name: Install dependencies
run: cd apps/frontend && npm ci
+ - name: Cache pip wheel cache
+ uses: actions/cache@v4
+ with:
+ path: ~/Library/Caches/pip
+ key: pip-wheel-${{ runner.os }}-arm64-${{ hashFiles('apps/backend/requirements.txt') }}
+ restore-keys: |
+ pip-wheel-${{ runner.os }}-arm64-
+
- name: Cache bundled Python
uses: actions/cache@v4
with:
path: apps/frontend/python-runtime
- key: python-bundle-${{ runner.os }}-arm64-3.12.8
+ key: python-bundle-${{ runner.os }}-arm64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }}
restore-keys: |
- python-bundle-${{ runner.os }}-arm64-
+ python-bundle-${{ runner.os }}-arm64-3.12.8-
- name: Build application
run: cd apps/frontend && npm run build
+ env:
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Package macOS (Apple Silicon)
run: |
@@ -200,6 +227,9 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: ${{ secrets.MAC_CERTIFICATE }}
CSC_KEY_PASSWORD: ${{ secrets.MAC_CERTIFICATE_PASSWORD }}
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Notarize macOS ARM64 app
env:
@@ -235,6 +265,12 @@ jobs:
build-windows:
needs: create-tag
runs-on: windows-latest
+ permissions:
+ id-token: write # Required for OIDC authentication with Azure
+ contents: read
+ env:
+ # Job-level env so AZURE_CLIENT_ID is available for step-level if conditions
+ AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
steps:
- uses: actions/checkout@v4
with:
@@ -265,16 +301,28 @@ jobs:
- name: Install dependencies
run: cd apps/frontend && npm ci
+ - name: Cache pip wheel cache
+ uses: actions/cache@v4
+ with:
+ path: ~\AppData\Local\pip\Cache
+ key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }}
+ restore-keys: |
+ pip-wheel-${{ runner.os }}-x64-
+
- name: Cache bundled Python
uses: actions/cache@v4
with:
path: apps/frontend/python-runtime
- key: python-bundle-${{ runner.os }}-x64-3.12.8
+ key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }}
restore-keys: |
- python-bundle-${{ runner.os }}-x64-
+ python-bundle-${{ runner.os }}-x64-3.12.8-
- name: Build application
run: cd apps/frontend && npm run build
+ env:
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Package Windows
shell: bash
@@ -283,8 +331,122 @@ jobs:
cd apps/frontend && npm run package:win -- --config.extraMetadata.version="$VERSION"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- CSC_LINK: ${{ secrets.WIN_CERTIFICATE }}
- CSC_KEY_PASSWORD: ${{ secrets.WIN_CERTIFICATE_PASSWORD }}
+ # Disable electron-builder's built-in signing (we use Azure Trusted Signing instead)
+ CSC_IDENTITY_AUTO_DISCOVERY: false
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
+
+ - name: Azure Login (OIDC)
+ if: env.AZURE_CLIENT_ID != ''
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+ - name: Sign Windows executable with Azure Trusted Signing
+ if: env.AZURE_CLIENT_ID != ''
+ uses: azure/trusted-signing-action@v0.5.11
+ with:
+ endpoint: https://neu.codesigning.azure.net/
+ trusted-signing-account-name: ${{ secrets.AZURE_SIGNING_ACCOUNT }}
+ certificate-profile-name: ${{ secrets.AZURE_CERTIFICATE_PROFILE }}
+ files-folder: apps/frontend/dist
+ files-folder-filter: exe
+ file-digest: SHA256
+ timestamp-rfc3161: http://timestamp.acs.microsoft.com
+ timestamp-digest: SHA256
+
+ - name: Verify Windows executable is signed
+ if: env.AZURE_CLIENT_ID != ''
+ shell: pwsh
+ run: |
+ cd apps/frontend/dist
+ $exeFile = Get-ChildItem -Filter "*.exe" | Select-Object -First 1
+ if ($exeFile) {
+ Write-Host "Verifying signature on $($exeFile.Name)..."
+ $sig = Get-AuthenticodeSignature -FilePath $exeFile.FullName
+ if ($sig.Status -ne 'Valid') {
+ Write-Host "::error::Signature verification failed: $($sig.Status)"
+ Write-Host "::error::Status Message: $($sig.StatusMessage)"
+ exit 1
+ }
+ Write-Host "✅ Signature verified successfully"
+ Write-Host " Subject: $($sig.SignerCertificate.Subject)"
+ Write-Host " Issuer: $($sig.SignerCertificate.Issuer)"
+ Write-Host " Thumbprint: $($sig.SignerCertificate.Thumbprint)"
+ } else {
+ Write-Host "::error::No .exe file found to verify"
+ exit 1
+ }
+
+ - name: Regenerate checksums after signing
+ if: env.AZURE_CLIENT_ID != ''
+ shell: pwsh
+ run: |
+ $ErrorActionPreference = "Stop"
+ cd apps/frontend/dist
+
+ # Find the installer exe (electron-builder names it with "Setup" or just the app name)
+ # electron-builder produces one installer exe per build
+ $exeFiles = Get-ChildItem -Filter "*.exe"
+ if ($exeFiles.Count -eq 0) {
+ Write-Host "::error::No .exe files found in dist folder"
+ exit 1
+ }
+
+ Write-Host "Found $($exeFiles.Count) exe file(s): $($exeFiles.Name -join ', ')"
+
+ $ymlFile = "latest.yml"
+ if (-not (Test-Path $ymlFile)) {
+ Write-Host "::error::$ymlFile not found - cannot update checksums"
+ exit 1
+ }
+
+ $content = Get-Content $ymlFile -Raw
+ $originalContent = $content
+
+ # Process each exe file and update its hash in latest.yml
+ foreach ($exeFile in $exeFiles) {
+ Write-Host "Processing $($exeFile.Name)..."
+
+ # Compute SHA512 hash and convert to base64 (electron-builder format)
+ $bytes = [System.IO.File]::ReadAllBytes($exeFile.FullName)
+ $sha512 = [System.Security.Cryptography.SHA512]::Create()
+ $hashBytes = $sha512.ComputeHash($bytes)
+ $hash = [System.Convert]::ToBase64String($hashBytes)
+ $size = $exeFile.Length
+
+ Write-Host " Hash: $hash"
+ Write-Host " Size: $size"
+ }
+
+ # For electron-builder, latest.yml has a single file entry for the installer
+ # Update the sha512 and size for the primary exe (first one, typically the installer)
+ $primaryExe = $exeFiles | Select-Object -First 1
+ $bytes = [System.IO.File]::ReadAllBytes($primaryExe.FullName)
+ $sha512 = [System.Security.Cryptography.SHA512]::Create()
+ $hashBytes = $sha512.ComputeHash($bytes)
+ $hash = [System.Convert]::ToBase64String($hashBytes)
+ $size = $primaryExe.Length
+
+ # Update sha512 hash (base64 pattern: alphanumeric, +, /, =)
+ $content = $content -replace 'sha512: [A-Za-z0-9+/=]+', "sha512: $hash"
+ # Update size
+ $content = $content -replace 'size: \d+', "size: $size"
+
+ if ($content -eq $originalContent) {
+ Write-Host "::error::Checksum replacement failed - content unchanged. Check if latest.yml format has changed."
+ exit 1
+ }
+
+ Set-Content -Path $ymlFile -Value $content -NoNewline
+ Write-Host "✅ Updated $ymlFile with new base64 hash and size for $($primaryExe.Name)"
+
+ - name: Skip signing notice
+ if: env.AZURE_CLIENT_ID == ''
+ run: echo "::warning::Windows signing skipped - AZURE_CLIENT_ID not configured. The .exe will be unsigned."
- name: Upload artifacts
uses: actions/upload-artifact@v4
@@ -335,16 +497,28 @@ jobs:
flatpak install -y --user flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08
flatpak install -y --user flathub org.electronjs.Electron2.BaseApp//25.08
+ - name: Cache pip wheel cache
+ uses: actions/cache@v4
+ with:
+ path: ~/.cache/pip
+ key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }}
+ restore-keys: |
+ pip-wheel-${{ runner.os }}-x64-
+
- name: Cache bundled Python
uses: actions/cache@v4
with:
path: apps/frontend/python-runtime
- key: python-bundle-${{ runner.os }}-x64-3.12.8
+ key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }}
restore-keys: |
- python-bundle-${{ runner.os }}-x64-
+ python-bundle-${{ runner.os }}-x64-3.12.8-
- name: Build application
run: cd apps/frontend && npm run build
+ env:
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Package Linux
run: |
@@ -352,6 +526,9 @@ jobs:
cd apps/frontend && npm run package:linux -- --config.extraMetadata.version="$VERSION"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
diff --git a/.github/workflows/pr-auto-label.yml b/.github/workflows/pr-auto-label.yml
deleted file mode 100644
index ac6775e7b8..0000000000
--- a/.github/workflows/pr-auto-label.yml
+++ /dev/null
@@ -1,227 +0,0 @@
-name: PR Auto Label
-
-on:
- pull_request:
- types: [opened, synchronize, reopened]
-
-# Cancel in-progress runs for the same PR
-concurrency:
- group: pr-auto-label-${{ github.event.pull_request.number }}
- cancel-in-progress: true
-
-permissions:
- contents: read
- pull-requests: write
-
-jobs:
- label:
- name: Auto Label PR
- runs-on: ubuntu-latest
- # Don't run on fork PRs (they can't write labels)
- if: github.event.pull_request.head.repo.full_name == github.repository
- timeout-minutes: 5
- steps:
- - name: Auto-label PR
- uses: actions/github-script@v7
- with:
- retries: 3
- retry-exempt-status-codes: 400,401,403,404,422
- script: |
- const { owner, repo } = context.repo;
- const pr = context.payload.pull_request;
- const prNumber = pr.number;
- const title = pr.title;
-
- console.log(`::group::PR #${prNumber} - Auto-labeling`);
- console.log(`Title: ${title}`);
-
- const labelsToAdd = new Set();
- const labelsToRemove = new Set();
-
- // ═══════════════════════════════════════════════════════════════
- // TYPE LABELS (from PR title - Conventional Commits)
- // ═══════════════════════════════════════════════════════════════
- const typeMap = {
- 'feat': 'feature',
- 'fix': 'bug',
- 'docs': 'documentation',
- 'refactor': 'refactor',
- 'test': 'test',
- 'ci': 'ci',
- 'chore': 'chore',
- 'perf': 'performance',
- 'style': 'style',
- 'build': 'build'
- };
-
- const typeMatch = title.match(/^(\w+)(\(.+?\))?(!)?:/);
- if (typeMatch) {
- const type = typeMatch[1].toLowerCase();
- const isBreaking = typeMatch[3] === '!';
-
- if (typeMap[type]) {
- labelsToAdd.add(typeMap[type]);
- console.log(` 📝 Type: ${type} → ${typeMap[type]}`);
- }
-
- if (isBreaking) {
- labelsToAdd.add('breaking-change');
- console.log(` ⚠️ Breaking change detected`);
- }
- } else {
- console.log(` ⚠️ No conventional commit prefix found in title`);
- }
-
- // ═══════════════════════════════════════════════════════════════
- // AREA LABELS (from changed files)
- // ═══════════════════════════════════════════════════════════════
- let files = [];
- try {
- const { data } = await github.rest.pulls.listFiles({
- owner,
- repo,
- pull_number: prNumber,
- per_page: 100
- });
- files = data;
- } catch (e) {
- console.log(` ⚠️ Could not fetch files: ${e.message}`);
- }
-
- const areas = {
- frontend: false,
- backend: false,
- ci: false,
- docs: false,
- tests: false
- };
-
- for (const file of files) {
- const path = file.filename;
- if (path.startsWith('apps/frontend/')) areas.frontend = true;
- if (path.startsWith('apps/backend/')) areas.backend = true;
- if (path.startsWith('.github/')) areas.ci = true;
- if (path.endsWith('.md') || path.startsWith('docs/')) areas.docs = true;
- if (path.startsWith('tests/') || path.includes('.test.') || path.includes('.spec.')) areas.tests = true;
- }
-
- // Determine area label (mutually exclusive)
- const areaLabels = ['area/frontend', 'area/backend', 'area/fullstack', 'area/ci'];
-
- if (areas.frontend && areas.backend) {
- labelsToAdd.add('area/fullstack');
- areaLabels.filter(l => l !== 'area/fullstack').forEach(l => labelsToRemove.add(l));
- console.log(` 📁 Area: fullstack (${files.length} files)`);
- } else if (areas.frontend) {
- labelsToAdd.add('area/frontend');
- areaLabels.filter(l => l !== 'area/frontend').forEach(l => labelsToRemove.add(l));
- console.log(` 📁 Area: frontend (${files.length} files)`);
- } else if (areas.backend) {
- labelsToAdd.add('area/backend');
- areaLabels.filter(l => l !== 'area/backend').forEach(l => labelsToRemove.add(l));
- console.log(` 📁 Area: backend (${files.length} files)`);
- } else if (areas.ci) {
- labelsToAdd.add('area/ci');
- areaLabels.filter(l => l !== 'area/ci').forEach(l => labelsToRemove.add(l));
- console.log(` 📁 Area: ci (${files.length} files)`);
- }
-
- // ═══════════════════════════════════════════════════════════════
- // SIZE LABELS (from lines changed)
- // ═══════════════════════════════════════════════════════════════
- const additions = pr.additions || 0;
- const deletions = pr.deletions || 0;
- const totalLines = additions + deletions;
-
- const sizeLabels = ['size/XS', 'size/S', 'size/M', 'size/L', 'size/XL'];
- let sizeLabel;
-
- if (totalLines < 10) sizeLabel = 'size/XS';
- else if (totalLines < 100) sizeLabel = 'size/S';
- else if (totalLines < 500) sizeLabel = 'size/M';
- else if (totalLines < 1000) sizeLabel = 'size/L';
- else sizeLabel = 'size/XL';
-
- labelsToAdd.add(sizeLabel);
- sizeLabels.filter(l => l !== sizeLabel).forEach(l => labelsToRemove.add(l));
- console.log(` 📏 Size: ${sizeLabel} (+${additions}/-${deletions} = ${totalLines} lines)`);
-
- console.log('::endgroup::');
-
- // ═══════════════════════════════════════════════════════════════
- // APPLY LABELS
- // ═══════════════════════════════════════════════════════════════
- console.log(`::group::Applying labels`);
-
- // Remove old labels (in parallel)
- const removeArray = [...labelsToRemove].filter(l => !labelsToAdd.has(l));
- if (removeArray.length > 0) {
- const removePromises = removeArray.map(async (label) => {
- try {
- await github.rest.issues.removeLabel({
- owner,
- repo,
- issue_number: prNumber,
- name: label
- });
- console.log(` ✓ Removed: ${label}`);
- } catch (e) {
- if (e.status !== 404) {
- console.log(` ⚠ Could not remove ${label}: ${e.message}`);
- }
- }
- });
- await Promise.all(removePromises);
- }
-
- // Add new labels
- const addArray = [...labelsToAdd];
- if (addArray.length > 0) {
- try {
- await github.rest.issues.addLabels({
- owner,
- repo,
- issue_number: prNumber,
- labels: addArray
- });
- console.log(` ✓ Added: ${addArray.join(', ')}`);
- } catch (e) {
- // Some labels might not exist
- if (e.status === 404) {
- core.warning(`Some labels do not exist. Please create them in repository settings.`);
- // Try adding one by one
- for (const label of addArray) {
- try {
- await github.rest.issues.addLabels({
- owner,
- repo,
- issue_number: prNumber,
- labels: [label]
- });
- } catch (e2) {
- console.log(` ⚠ Label '${label}' does not exist`);
- }
- }
- } else {
- throw e;
- }
- }
- }
-
- console.log('::endgroup::');
-
- // Summary
- console.log(`✅ PR #${prNumber} labeled: ${addArray.join(', ')}`);
-
- // Write job summary
- core.summary
- .addHeading(`PR #${prNumber} Auto-Labels`, 3)
- .addTable([
- [{data: 'Category', header: true}, {data: 'Label', header: true}],
- ['Type', typeMatch ? typeMap[typeMatch[1].toLowerCase()] || 'none' : 'none'],
- ['Area', areas.frontend && areas.backend ? 'fullstack' : areas.frontend ? 'frontend' : areas.backend ? 'backend' : 'other'],
- ['Size', sizeLabel]
- ])
- .addRaw(`\n**Files changed:** ${files.length}\n`)
- .addRaw(`**Lines:** +${additions} / -${deletions}\n`);
- await core.summary.write();
diff --git a/.github/workflows/pr-labeler.yml b/.github/workflows/pr-labeler.yml
new file mode 100644
index 0000000000..989eaec525
--- /dev/null
+++ b/.github/workflows/pr-labeler.yml
@@ -0,0 +1,320 @@
+name: PR Labeler
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened]
+
+concurrency:
+ group: pr-labeler-${{ github.event.pull_request.number }}
+ cancel-in-progress: true
+
+permissions:
+ contents: read
+ pull-requests: write
+
+jobs:
+ label:
+ name: Auto Label PR
+ runs-on: ubuntu-latest
+ # Security: Prevent fork PRs from modifying labels (they don't have write access)
+ if: github.event.pull_request.head.repo.full_name == github.repository
+ timeout-minutes: 5
+
+ steps:
+ - name: Label PR
+ uses: actions/github-script@v7
+ with:
+ retries: 3
+ retry-exempt-status-codes: 400,401,403,404,422
+ script: |
+ // ═══════════════════════════════════════════════════════════════
+ // CONFIGURATION - Single source of truth for all settings
+ // ═══════════════════════════════════════════════════════════════
+
+ const CONFIG = {
+ // Size thresholds (lines changed)
+ SIZE_THRESHOLDS: {
+ XS: 10,
+ S: 100,
+ M: 500,
+ L: 1000
+ },
+
+ // Conventional commit type mappings
+ TYPE_MAP: Object.freeze({
+ 'feat': 'feature',
+ 'fix': 'bug',
+ 'docs': 'documentation',
+ 'refactor': 'refactor',
+ 'test': 'test',
+ 'ci': 'ci',
+ 'chore': 'chore',
+ 'perf': 'performance',
+ 'style': 'style',
+ 'build': 'build'
+ }),
+
+ // Area detection paths
+ AREA_PATHS: Object.freeze({
+ frontend: 'apps/frontend/',
+ backend: 'apps/backend/',
+ ci: '.github/'
+ }),
+
+ // Label definitions
+ LABELS: Object.freeze({
+ SIZE: ['size/XS', 'size/S', 'size/M', 'size/L', 'size/XL'],
+ AREA: ['area/frontend', 'area/backend', 'area/fullstack', 'area/ci'],
+ STATUS: ['🔄 Checking', '✅ Ready for Review', '❌ Checks Failed'],
+ REVIEW: ['Missing AC Approval', 'AC: Approved', 'AC: Changes Requested', 'AC: Needs Re-review']
+ }),
+
+ // Pagination
+ MAX_FILES_PER_PAGE: 100
+ };
+
+ // ═══════════════════════════════════════════════════════════════
+ // HELPER FUNCTIONS - Small, focused, single responsibility
+ // ═══════════════════════════════════════════════════════════════
+
+ /**
+ * Safely parse conventional commit type from PR title
+ * @param {string} title - PR title
+ * @returns {{type: string|null, isBreaking: boolean}}
+ */
+ function parseConventionalCommit(title) {
+ if (!title || typeof title !== 'string') {
+ return { type: null, isBreaking: false };
+ }
+
+ // Limit input length to prevent ReDoS attacks
+ const safeTitle = title.slice(0, 200);
+ const match = safeTitle.match(/^(\w{1,20})(\([^)]{0,50}\))?(!)?:/);
+
+ if (!match) {
+ return { type: null, isBreaking: false };
+ }
+
+ return {
+ type: match[1].toLowerCase(),
+ isBreaking: match[3] === '!'
+ };
+ }
+
+ /**
+ * Determine size label based on lines changed
+ * @param {number} totalLines - Total lines changed
+ * @returns {string} Size label
+ */
+ function determineSizeLabel(totalLines) {
+ const { SIZE_THRESHOLDS } = CONFIG;
+
+ if (totalLines < SIZE_THRESHOLDS.XS) return 'size/XS';
+ if (totalLines < SIZE_THRESHOLDS.S) return 'size/S';
+ if (totalLines < SIZE_THRESHOLDS.M) return 'size/M';
+ if (totalLines < SIZE_THRESHOLDS.L) return 'size/L';
+ return 'size/XL';
+ }
+
+ /**
+ * Detect areas affected by file changes
+ * @param {Array} files - List of changed files
+ * @returns {{frontend: boolean, backend: boolean, ci: boolean}}
+ */
+ function detectAreas(files) {
+ const areas = { frontend: false, backend: false, ci: false };
+ const { AREA_PATHS } = CONFIG;
+
+ for (const file of files) {
+ const path = file.filename || '';
+ if (path.startsWith(AREA_PATHS.frontend)) areas.frontend = true;
+ if (path.startsWith(AREA_PATHS.backend)) areas.backend = true;
+ if (path.startsWith(AREA_PATHS.ci)) areas.ci = true;
+ }
+
+ return areas;
+ }
+
+ /**
+ * Determine area label based on detected areas
+ * @param {{frontend: boolean, backend: boolean, ci: boolean}} areas
+ * @returns {string|null} Area label or null
+ */
+ function determineAreaLabel(areas) {
+ if (areas.frontend && areas.backend) return 'area/fullstack';
+ if (areas.frontend) return 'area/frontend';
+ if (areas.backend) return 'area/backend';
+ if (areas.ci) return 'area/ci';
+ return null;
+ }
+
+ /**
+ * Remove labels from PR (with error handling)
+ * @param {Array} labels - Labels to remove
+ * @param {number} prNumber - PR number
+ */
+ async function removeLabels(labels, prNumber) {
+ const { owner, repo } = context.repo;
+
+ await Promise.allSettled(labels.map(async (label) => {
+ try {
+ await github.rest.issues.removeLabel({
+ owner,
+ repo,
+ issue_number: prNumber,
+ name: label
+ });
+ console.log(` ✓ Removed: ${label}`);
+ } catch (e) {
+ // 404 means label wasn't present - that's fine
+ if (e.status !== 404) {
+ console.log(` ⚠ Failed to remove ${label}: ${e.message}`);
+ }
+ }
+ }));
+ }
+
+ /**
+ * Add labels to PR (with error handling)
+ * @param {Array} labels - Labels to add
+ * @param {number} prNumber - PR number
+ */
+ async function addLabels(labels, prNumber) {
+ if (labels.length === 0) return;
+
+ const { owner, repo } = context.repo;
+
+ try {
+ await github.rest.issues.addLabels({
+ owner,
+ repo,
+ issue_number: prNumber,
+ labels
+ });
+ console.log(` ✓ Added: ${labels.join(', ')}`);
+ } catch (e) {
+ if (e.status === 404) {
+ core.warning(`One or more labels do not exist. Create them in repository settings.`);
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ /**
+ * Fetch PR files with full pagination support
+ * @param {number} prNumber - PR number
+ * @returns {Array} List of all files (paginated)
+ */
+ async function fetchPRFiles(prNumber) {
+ const { owner, repo } = context.repo;
+
+ try {
+ // Use paginate to fetch ALL files, not just first 100
+ const files = await github.paginate(
+ github.rest.pulls.listFiles,
+ { owner, repo, pull_number: prNumber, per_page: CONFIG.MAX_FILES_PER_PAGE }
+ );
+ return files;
+ } catch (e) {
+ console.log(` ⚠ Could not fetch files: ${e.message}`);
+ return [];
+ }
+ }
+
+ // ═══════════════════════════════════════════════════════════════
+ // MAIN LOGIC - Orchestrates the labeling process
+ // ═══════════════════════════════════════════════════════════════
+
+ const { owner, repo } = context.repo;
+ const pr = context.payload.pull_request;
+ const prNumber = pr.number;
+ const title = pr.title || '';
+ const isNewPR = context.payload.action === 'opened' || context.payload.action === 'reopened';
+
+ console.log(`::group::PR #${prNumber} - Auto-labeling`);
+ console.log(`Title: ${title.slice(0, 100)}${title.length > 100 ? '...' : ''}`);
+ console.log(`Action: ${context.payload.action}`);
+
+ const labelsToAdd = new Set();
+ const labelsToRemove = new Set();
+
+ // 1. Parse conventional commit type
+ const { type, isBreaking } = parseConventionalCommit(title);
+ if (type && CONFIG.TYPE_MAP[type]) {
+ labelsToAdd.add(CONFIG.TYPE_MAP[type]);
+ console.log(` 📝 Type: ${type} → ${CONFIG.TYPE_MAP[type]}`);
+ } else {
+ console.log(` ℹ️ No conventional commit prefix detected`);
+ }
+
+ if (isBreaking) {
+ labelsToAdd.add('breaking-change');
+ console.log(` ⚠️ Breaking change detected`);
+ }
+
+ // 2. Detect areas from changed files
+ const files = await fetchPRFiles(prNumber);
+ const areas = detectAreas(files);
+ const areaLabel = determineAreaLabel(areas);
+
+ if (areaLabel) {
+ labelsToAdd.add(areaLabel);
+ CONFIG.LABELS.AREA.filter(l => l !== areaLabel).forEach(l => labelsToRemove.add(l));
+ console.log(` 📁 Area: ${areaLabel.replace('area/', '')}`);
+ }
+
+ // 3. Calculate size label
+ const totalLines = (pr.additions || 0) + (pr.deletions || 0);
+ const sizeLabel = determineSizeLabel(totalLines);
+ labelsToAdd.add(sizeLabel);
+ CONFIG.LABELS.SIZE.filter(l => l !== sizeLabel).forEach(l => labelsToRemove.add(l));
+ console.log(` 📏 Size: ${sizeLabel} (${totalLines} lines)`);
+
+ // 4. Set status label (only on new PRs - let pr-status-gate handle updates on pushes)
+ // Note: On synchronize events, CI workflows will trigger pr-status-gate when they complete
+ if (isNewPR) {
+ labelsToAdd.add('🔄 Checking');
+ CONFIG.LABELS.STATUS.filter(l => l !== '🔄 Checking').forEach(l => labelsToRemove.add(l));
+ console.log(` 🔄 Status: Checking`);
+ } else {
+ console.log(` ℹ️ Status: Unchanged (will be updated by pr-status-gate)`);
+ }
+
+ // 5. Add review label for new PRs only
+ if (isNewPR) {
+ labelsToAdd.add('Missing AC Approval');
+ console.log(` ⏳ Review: Missing AC Approval`);
+ }
+
+ console.log('::endgroup::');
+
+ // 6. Apply label changes
+ console.log(`::group::Applying labels`);
+
+ // Remove labels that should be replaced (exclude ones we're adding)
+ const removeList = [...labelsToRemove].filter(l => !labelsToAdd.has(l));
+ await removeLabels(removeList, prNumber);
+
+ // Add new labels
+ await addLabels([...labelsToAdd], prNumber);
+
+ console.log('::endgroup::');
+ console.log(`✅ PR #${prNumber} labeled successfully`);
+
+ // 7. Write job summary
+ const summaryType = type ? CONFIG.TYPE_MAP[type] || 'unknown' : 'none';
+ const summaryArea = areaLabel ? areaLabel.replace('area/', '') : 'other';
+
+ await core.summary
+ .addHeading(`PR #${prNumber} Auto-Labels`, 3)
+ .addTable([
+ [{ data: 'Category', header: true }, { data: 'Label', header: true }],
+ ['Type', summaryType],
+ ['Area', summaryArea],
+ ['Size', sizeLabel],
+ ['Status', isNewPR ? '🔄 Checking' : '(unchanged)'],
+ ['Review', isNewPR ? 'Missing AC Approval' : '(unchanged)']
+ ])
+ .addRaw(`\n**Files:** ${files.length} | **Lines:** +${pr.additions || 0} / -${pr.deletions || 0}\n`)
+ .write();
diff --git a/.github/workflows/pr-status-check.yml b/.github/workflows/pr-status-check.yml
deleted file mode 100644
index 95c6239e94..0000000000
--- a/.github/workflows/pr-status-check.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-name: PR Status Check
-
-on:
- pull_request:
- types: [opened, synchronize, reopened]
-
-# Cancel in-progress runs for the same PR
-concurrency:
- group: pr-status-${{ github.event.pull_request.number }}
- cancel-in-progress: true
-
-permissions:
- pull-requests: write
-
-jobs:
- mark-checking:
- name: Set Checking Status
- runs-on: ubuntu-latest
- # Don't run on fork PRs (they can't write labels)
- if: github.event.pull_request.head.repo.full_name == github.repository
- timeout-minutes: 5
- steps:
- - name: Update PR status label
- uses: actions/github-script@v7
- with:
- retries: 3
- retry-exempt-status-codes: 400,401,403,404,422
- script: |
- const { owner, repo } = context.repo;
- const prNumber = context.payload.pull_request.number;
- const statusLabels = ['🔄 Checking', '✅ Ready for Review', '❌ Checks Failed'];
-
- console.log(`::group::PR #${prNumber} - Setting status to Checking`);
-
- // Remove old status labels (parallel for speed)
- const removePromises = statusLabels.map(async (label) => {
- try {
- await github.rest.issues.removeLabel({
- owner,
- repo,
- issue_number: prNumber,
- name: label
- });
- console.log(` ✓ Removed: ${label}`);
- } catch (e) {
- if (e.status !== 404) {
- console.log(` ⚠ Could not remove ${label}: ${e.message}`);
- }
- }
- });
-
- await Promise.all(removePromises);
-
- // Add checking label
- try {
- await github.rest.issues.addLabels({
- owner,
- repo,
- issue_number: prNumber,
- labels: ['🔄 Checking']
- });
- console.log(` ✓ Added: 🔄 Checking`);
- } catch (e) {
- // Label might not exist - create helpful error
- if (e.status === 404) {
- core.warning(`Label '🔄 Checking' does not exist. Please create it in repository settings.`);
- }
- throw e;
- }
-
- console.log('::endgroup::');
- console.log(`✅ PR #${prNumber} marked as checking`);
diff --git a/.github/workflows/pr-status-gate.yml b/.github/workflows/pr-status-gate.yml
index b28b896d2b..69cb9bd593 100644
--- a/.github/workflows/pr-status-gate.yml
+++ b/.github/workflows/pr-status-gate.yml
@@ -5,187 +5,581 @@ on:
workflows: [CI, Lint, Quality Security]
types: [completed]
+ issue_comment:
+ types: [created, edited]
+
+ pull_request:
+ types: [synchronize]
+
+concurrency:
+ group: pr-status-gate-${{ github.event.workflow_run.pull_requests[0].number || github.event.issue.number || github.event.pull_request.number || github.run_id }}
+ cancel-in-progress: true
+
permissions:
pull-requests: write
checks: read
+env:
+ # Shared configuration - single source of truth
+ REQUIRED_CHECKS: |
+ CI / test-frontend
+ CI / test-python (3.12)
+ CI / test-python (3.13)
+ Lint / python
+ Quality Security / CodeQL (javascript-typescript)
+ Quality Security / CodeQL (python)
+ Quality Security / Python Security (Bandit)
+ Quality Security / Security Summary
+
jobs:
- update-status:
- name: Update PR Status
+ # ═══════════════════════════════════════════════════════════════════════════
+ # JOB 1: CI STATUS (triggered by workflow_run)
+ # Updates CI status labels when monitored workflows complete
+ # ═══════════════════════════════════════════════════════════════════════════
+ update-ci-status:
+ name: Update CI Status
runs-on: ubuntu-latest
- # Only run if this workflow_run is associated with a PR
- if: github.event.workflow_run.pull_requests[0] != null
+ if: github.event_name == 'workflow_run' && github.event.workflow_run.pull_requests[0] != null
timeout-minutes: 5
+
steps:
- name: Check all required checks and update label
uses: actions/github-script@v7
+ env:
+ REQUIRED_CHECKS: ${{ env.REQUIRED_CHECKS }}
with:
retries: 3
retry-exempt-status-codes: 400,401,403,404,422
script: |
- const { owner, repo } = context.repo;
+ // NOTE: STATUS_LABELS is intentionally duplicated across jobs.
+ // GitHub Actions jobs run in isolated contexts and cannot share runtime constants.
+ // If label values change, update ALL occurrences: update-ci-status, check-status-command
+ const STATUS_LABELS = Object.freeze({
+ CHECKING: '🔄 Checking',
+ PASSED: '✅ Ready for Review',
+ FAILED: '❌ Checks Failed'
+ });
+
+ const REQUIRED_CHECKS = process.env.REQUIRED_CHECKS
+ .split('\n')
+ .map(s => s.trim())
+ .filter(Boolean);
+
+ async function fetchCheckRuns(sha) {
+ const { owner, repo } = context.repo;
+ // Let the configured retries (retries: 3) handle transient failures
+ // Don't catch errors - allow them to propagate for retry logic
+ const checkRuns = await github.paginate(
+ github.rest.checks.listForRef,
+ { owner, repo, ref: sha, per_page: 100 },
+ (response) => response.data
+ );
+ return checkRuns;
+ }
+
+ function analyzeChecks(checkRuns) {
+ const results = [];
+ let allComplete = true;
+ let anyFailed = false;
+
+ for (const checkName of REQUIRED_CHECKS) {
+ const check = checkRuns.find(c => c.name === checkName);
+
+ if (!check) {
+ results.push({ name: checkName, status: '⏳ Pending', complete: false });
+ allComplete = false;
+ } else if (check.status !== 'completed') {
+ results.push({ name: checkName, status: '🔄 Running', complete: false });
+ allComplete = false;
+ } else if (check.conclusion === 'success') {
+ results.push({ name: checkName, status: '✅ Passed', complete: true });
+ } else if (check.conclusion === 'skipped') {
+ results.push({ name: checkName, status: '⏭️ Skipped', complete: true, skipped: true });
+ } else {
+ results.push({ name: checkName, status: '❌ Failed', complete: true, failed: true });
+ anyFailed = true;
+ }
+ }
+ return { allComplete, anyFailed, results };
+ }
+
+ async function updateStatusLabels(prNumber, newLabel) {
+ const { owner, repo } = context.repo;
+ const allLabels = Object.values(STATUS_LABELS);
+
+ // Remove all status labels first - throw on non-404 errors to prevent conflicting labels
+ for (const label of allLabels) {
+ try {
+ await github.rest.issues.removeLabel({ owner, repo, issue_number: prNumber, name: label });
+ } catch (e) {
+ if (e && e.status !== 404) {
+ // Throw to prevent adding new label if removal failed (could cause conflicting labels)
+ throw new Error(`Failed to remove label '${label}': ${e.message}`);
+ }
+ }
+ }
+
+ try {
+ await github.rest.issues.addLabels({ owner, repo, issue_number: prNumber, labels: [newLabel] });
+ } catch (e) {
+ if (e && e.status === 404) {
+ core.warning(`Label '${newLabel}' does not exist`);
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ // Main logic
const prNumber = context.payload.workflow_run.pull_requests[0].number;
const headSha = context.payload.workflow_run.head_sha;
const triggerWorkflow = context.payload.workflow_run.name;
- // ═══════════════════════════════════════════════════════════════════════
- // REQUIRED CHECK RUNS - Job-level checks (not workflow-level)
- // ═══════════════════════════════════════════════════════════════════════
- // Format: "{Workflow Name} / {Job Name}" or "{Workflow Name} / {Job Custom Name}"
- //
- // To find check names: Go to PR → Checks tab → copy exact name
- // To update: Edit this list when workflow jobs are added/renamed/removed
- //
- // Last validated: 2026-01-02
- // ═══════════════════════════════════════════════════════════════════════
- const requiredChecks = [
- // CI workflow (ci.yml) - 3 checks
- 'CI / test-frontend',
- 'CI / test-python (3.12)',
- 'CI / test-python (3.13)',
- // Lint workflow (lint.yml) - 1 check
- 'Lint / python',
- // Quality Security workflow (quality-security.yml) - 4 checks
- 'Quality Security / CodeQL (javascript-typescript)',
- 'Quality Security / CodeQL (python)',
- 'Quality Security / Python Security (Bandit)',
- 'Quality Security / Security Summary'
- ];
+ console.log(`PR #${prNumber} - Triggered by: ${triggerWorkflow}, SHA: ${headSha.slice(0, 8)}`);
- const statusLabels = {
- checking: '🔄 Checking',
- passed: '✅ Ready for Review',
- failed: '❌ Checks Failed'
- };
+ const checkRuns = await fetchCheckRuns(headSha);
+ console.log(`Found ${checkRuns.length} check runs`);
+ const { allComplete, anyFailed, results } = analyzeChecks(checkRuns);
- console.log(`::group::PR #${prNumber} - Checking required checks`);
- console.log(`Triggered by: ${triggerWorkflow}`);
- console.log(`Head SHA: ${headSha}`);
- console.log(`Required checks: ${requiredChecks.length}`);
- console.log('');
+ for (const r of results) {
+ console.log(` ${r.status} ${r.name}`);
+ }
- // Fetch all check runs for this commit
- let allCheckRuns = [];
- try {
- const { data } = await github.rest.checks.listForRef({
- owner,
- repo,
- ref: headSha,
- per_page: 100
- });
- allCheckRuns = data.check_runs;
- console.log(`Found ${allCheckRuns.length} total check runs`);
- } catch (error) {
- // Add warning annotation so maintainers are alerted
- core.warning(`Failed to fetch check runs for PR #${prNumber}: ${error.message}. PR label may be outdated.`);
- console.log(`::error::Failed to fetch check runs: ${error.message}`);
- console.log('::endgroup::');
+ if (!allComplete) {
+ const pending = results.filter(r => !r.complete).length;
+ console.log(`⏳ ${pending}/${REQUIRED_CHECKS.length} checks pending`);
+ // Update to CHECKING status if checks are still running (prevents stale Ready/Failed status)
+ await updateStatusLabels(prNumber, STATUS_LABELS.CHECKING);
return;
}
+ const newLabel = anyFailed ? STATUS_LABELS.FAILED : STATUS_LABELS.PASSED;
+ await updateStatusLabels(prNumber, newLabel);
+
+ const passedCount = results.filter(r => r.status === '✅ Passed').length;
+ const failedCount = results.filter(r => r.failed).length;
+
+ if (anyFailed) {
+ console.log(`❌ PR #${prNumber}: ${failedCount} check(s) failed`);
+ } else {
+ console.log(`✅ PR #${prNumber}: Ready for review (${passedCount}/${REQUIRED_CHECKS.length} passed)`);
+ }
+
+ # ═══════════════════════════════════════════════════════════════════════════
+ # JOB 2: /check-status COMMAND
+ # Manual status check - anyone can trigger by commenting /check-status
+ # ═══════════════════════════════════════════════════════════════════════════
+ check-status-command:
+ name: Check Status Command
+ runs-on: ubuntu-latest
+ if: |
+ github.event_name == 'issue_comment' &&
+ github.event.issue.pull_request &&
+ contains(github.event.comment.body, '/check-status')
+ timeout-minutes: 5
+
+ steps:
+ - name: Run status check and post report
+ uses: actions/github-script@v7
+ env:
+ REQUIRED_CHECKS: ${{ env.REQUIRED_CHECKS }}
+ with:
+ retries: 3
+ retry-exempt-status-codes: 400,401,403,404,422
+ script: |
+ // NOTE: STATUS_LABELS is intentionally duplicated across jobs.
+ // GitHub Actions jobs run in isolated contexts and cannot share runtime constants.
+ // If label values change, update ALL occurrences: update-ci-status, check-status-command
+ const STATUS_LABELS = Object.freeze({
+ CHECKING: '🔄 Checking',
+ PASSED: '✅ Ready for Review',
+ FAILED: '❌ Checks Failed'
+ });
+
+ // NOTE: REVIEW_LABELS is intentionally duplicated across jobs.
+ // If label values change, update ALL occurrences: check-status-command, update-review-status
+ const REVIEW_LABELS = Object.freeze([
+ 'Missing AC Approval',
+ 'AC: Approved',
+ 'AC: Changes Requested',
+ 'AC: Blocked',
+ 'AC: Needs Re-review',
+ 'AC: Reviewed'
+ ]);
+
+ const REQUIRED_CHECKS = process.env.REQUIRED_CHECKS
+ .split('\n')
+ .map(s => s.trim())
+ .filter(Boolean);
+
+ const { owner, repo } = context.repo;
+ const prNumber = context.payload.issue.number;
+ const requestedBy = context.payload.comment.user.login;
+
+ // Get PR details
+ const { data: pr } = await github.rest.pulls.get({
+ owner, repo, pull_number: prNumber
+ });
+ const headSha = pr.head.sha;
+
+ console.log(`PR #${prNumber} - /check-status by @${requestedBy}, SHA: ${headSha.slice(0, 8)}`);
+
+ // Fetch check runs with pagination to handle >100 checks
+ const checkRuns = await github.paginate(
+ github.rest.checks.listForRef,
+ { owner, repo, ref: headSha, per_page: 100 },
+ (response) => response.data
+ );
+ console.log(`Found ${checkRuns.length} check runs`);
+
+ // Analyze results
+ const results = [];
let allComplete = true;
let anyFailed = false;
- const results = [];
- // Check each required check
- for (const checkName of requiredChecks) {
- const check = allCheckRuns.find(c => c.name === checkName);
+ for (const checkName of REQUIRED_CHECKS) {
+ const check = checkRuns.find(c => c.name === checkName);
if (!check) {
- results.push({ name: checkName, status: '⏳ Pending', complete: false });
+ results.push({ name: checkName, emoji: '⏳', complete: false });
allComplete = false;
} else if (check.status !== 'completed') {
- results.push({ name: checkName, status: '🔄 Running', complete: false });
+ results.push({ name: checkName, emoji: '🔄', complete: false });
allComplete = false;
} else if (check.conclusion === 'success') {
- results.push({ name: checkName, status: '✅ Passed', complete: true });
+ results.push({ name: checkName, emoji: '✅', complete: true });
} else if (check.conclusion === 'skipped') {
- // Skipped checks are treated as passed (e.g., path filters, conditional jobs)
- results.push({ name: checkName, status: '⏭️ Skipped', complete: true, skipped: true });
+ results.push({ name: checkName, emoji: '⏭️', complete: true, skipped: true });
} else {
- results.push({ name: checkName, status: '❌ Failed', complete: true, failed: true });
+ results.push({ name: checkName, emoji: '❌', complete: true, failed: true });
anyFailed = true;
}
}
- // Print results table
- console.log('');
- console.log('Check Status:');
- console.log('─'.repeat(70));
- for (const r of results) {
- const shortName = r.name.length > 55 ? r.name.substring(0, 52) + '...' : r.name;
- console.log(` ${r.status.padEnd(12)} ${shortName}`);
+ // Get current labels
+ const { data: currentLabels } = await github.rest.issues.listLabelsOnIssue({
+ owner, repo, issue_number: prNumber
+ });
+ const labelNames = currentLabels.map(l => l.name);
+ const currentStatusLabel = Object.values(STATUS_LABELS).find(l => labelNames.includes(l)) || 'None';
+ const currentReviewLabel = REVIEW_LABELS.find(l => labelNames.includes(l)) || 'None';
+
+ // Update label if all checks complete
+ let newStatusLabel = STATUS_LABELS.CHECKING;
+ let statusChanged = false;
+
+ if (allComplete) {
+ newStatusLabel = anyFailed ? STATUS_LABELS.FAILED : STATUS_LABELS.PASSED;
+
+ if (newStatusLabel !== currentStatusLabel) {
+ statusChanged = true;
+ // Remove all status labels first - throw on non-404 errors to prevent conflicting labels
+ for (const label of Object.values(STATUS_LABELS)) {
+ try {
+ await github.rest.issues.removeLabel({ owner, repo, issue_number: prNumber, name: label });
+ } catch (e) {
+ if (e && e.status !== 404) {
+ throw new Error(`Failed to remove label '${label}': ${e.message}`);
+ }
+ }
+ }
+ await github.rest.issues.addLabels({ owner, repo, issue_number: prNumber, labels: [newStatusLabel] });
+ }
}
- console.log('─'.repeat(70));
- console.log('::endgroup::');
- // Only update label if all required checks are complete
- if (!allComplete) {
- const pending = results.filter(r => !r.complete).length;
- console.log(`⏳ ${pending}/${requiredChecks.length} checks still pending - keeping current label`);
- return;
+ // Build status report
+ const passedCount = results.filter(r => r.emoji === '✅').length;
+ let statusEmoji = '🔄';
+ if (allComplete && !anyFailed) statusEmoji = '✅';
+ else if (allComplete && anyFailed) statusEmoji = '❌';
+
+ const checksTable = results.map(r => `| ${r.emoji} | ${r.name} |`).join('\n');
+
+ const lines = [
+ `## ${statusEmoji} PR Status Report`,
+ '',
+ `| Label | Value |`,
+ `|-------|-------|`,
+ `| CI Status | ${newStatusLabel} |`,
+ `| AC Review | ${currentReviewLabel} |`,
+ ''
+ ];
+
+ if (statusChanged) {
+ lines.push(`> Status updated: \`${currentStatusLabel}\` → \`${newStatusLabel}\``);
+ lines.push('');
}
- // Determine final label
- const newLabel = anyFailed ? statusLabels.failed : statusLabels.passed;
+ lines.push(`### CI Checks (${passedCount}/${REQUIRED_CHECKS.length} passed)`);
+ lines.push('');
+ lines.push('| Status | Check |');
+ lines.push('|--------|-------|');
+ lines.push(checksTable);
+ lines.push('');
+ lines.push('---');
+ lines.push(`Triggered by \`/check-status\` from @${requestedBy}`);
- console.log(`::group::Updating PR #${prNumber} label`);
+ await github.rest.issues.createComment({
+ owner, repo, issue_number: prNumber, body: lines.join('\n')
+ });
+
+ console.log(`✅ Posted status report to PR #${prNumber}`);
+
+ # ═══════════════════════════════════════════════════════════════════════════
+ # JOB 3: AUTO-CLAUDE REVIEW
+ # Processes Auto-Claude review comments from trusted sources
+ # Security: Only bots and collaborators can update labels
+ # ═══════════════════════════════════════════════════════════════════════════
+ update-review-status:
+ name: Update Review Status
+ runs-on: ubuntu-latest
+ if: |
+ github.event_name == 'issue_comment' &&
+ github.event.issue.pull_request &&
+ !contains(github.event.comment.body, '/check-status')
+ timeout-minutes: 5
+
+ steps:
+ - name: Check for Auto-Claude review
+ uses: actions/github-script@v7
+ with:
+ retries: 3
+ retry-exempt-status-codes: 400,401,403,404,422
+ script: |
+ // Security configuration
+ // SECURITY: Only [bot] suffixed accounts are protected by GitHub.
+ // Regular usernames can be registered by anyone and are NOT trusted.
+ const TRUSTED_BOT_ACCOUNTS = Object.freeze([
+ 'github-actions[bot]',
+ 'auto-claude[bot]'
+ ]);
+
+ const TRUSTED_AUTHOR_ASSOCIATIONS = Object.freeze([
+ 'COLLABORATOR',
+ 'MEMBER',
+ 'OWNER'
+ ]);
+
+ const IDENTIFIER_PATTERNS = Object.freeze([
+ '🤖 Auto Claude PR Review',
+ 'Auto Claude Review',
+ 'Auto-Claude Review'
+ ]);
+
+ // SECURITY: Regex patterns are tightened to prevent false matches
+ // Using \s* instead of .* and requiring specific emoji + verdict format
+ const VERDICTS = Object.freeze({
+ APPROVED: {
+ patterns: ['Auto Claude Review - APPROVED', '✅ Auto Claude Review - APPROVED'],
+ // Match: "Merge Verdict:" followed by whitespace/emoji, then ✅, then APPROVED/READY TO MERGE
+ regex: /Merge Verdict:\s*✅\s*(?:APPROVED|READY TO MERGE)/i,
+ label: 'AC: Approved'
+ },
+ CHANGES_REQUESTED: {
+ patterns: ['NEEDS REVISION', 'Needs Revision'],
+ // Match: "Merge Verdict:" followed by whitespace/emoji, then 🟠
+ regex: /Merge Verdict:\s*🟠/,
+ label: 'AC: Changes Requested'
+ },
+ BLOCKED: {
+ patterns: ['BLOCKED'],
+ // Match: "Merge Verdict:" followed by whitespace/emoji, then 🔴
+ regex: /Merge Verdict:\s*🔴/,
+ label: 'AC: Blocked'
+ }
+ });
+
+ // NOTE: REVIEW_LABELS is intentionally duplicated across jobs.
+ // GitHub Actions jobs run in isolated contexts and cannot share runtime constants.
+ // If label values change, update ALL occurrences: check-status-command, update-review-status
+ const REVIEW_LABELS = Object.freeze([
+ 'Missing AC Approval',
+ 'AC: Approved',
+ 'AC: Changes Requested',
+ 'AC: Blocked',
+ 'AC: Needs Re-review',
+ 'AC: Reviewed'
+ ]);
+
+ // Helper functions
+ // SECURITY: Verify both username AND account type to prevent spoofing
+ function isTrustedBot(username, userType) {
+ const isKnownBot = TRUSTED_BOT_ACCOUNTS.some(t => username.toLowerCase() === t.toLowerCase());
+ // Only trust if it's a known bot account AND GitHub confirms it's a Bot type
+ return isKnownBot && userType === 'Bot';
+ }
+
+ function isTrustedAssociation(assoc) {
+ return TRUSTED_AUTHOR_ASSOCIATIONS.includes(assoc);
+ }
+
+ function isAutoClaudeComment(body) {
+ return IDENTIFIER_PATTERNS.some(p => body.includes(p));
+ }
+
+ function parseVerdict(body) {
+ const safeBody = body.slice(0, 5000);
+ for (const [key, config] of Object.entries(VERDICTS)) {
+ const patternMatch = config.patterns.some(p => safeBody.includes(p));
+ const regexMatch = config.regex && config.regex.test(safeBody);
+ if (patternMatch || regexMatch) {
+ return { verdict: key, label: config.label };
+ }
+ }
+ return null;
+ }
+
+ async function updateReviewLabels(prNumber, newLabel) {
+ const { owner, repo } = context.repo;
+
+ // Remove all review labels first - throw on non-404 errors to prevent conflicting labels
+ for (const label of REVIEW_LABELS) {
+ try {
+ await github.rest.issues.removeLabel({ owner, repo, issue_number: prNumber, name: label });
+ console.log(` Removed: ${label}`);
+ } catch (e) {
+ if (e && e.status !== 404) {
+ // Throw to prevent adding new label if removal failed (could cause conflicting labels)
+ throw new Error(`Failed to remove label '${label}': ${e.message}`);
+ }
+ }
+ }
- // Remove old status labels
- for (const label of Object.values(statusLabels)) {
try {
- await github.rest.issues.removeLabel({
- owner,
- repo,
- issue_number: prNumber,
- name: label
- });
- console.log(` ✓ Removed: ${label}`);
+ await github.rest.issues.addLabels({ owner, repo, issue_number: prNumber, labels: [newLabel] });
+ console.log(` Added: ${newLabel}`);
} catch (e) {
- if (e.status !== 404) {
- console.log(` ⚠ Could not remove ${label}: ${e.message}`);
+ if (e && e.status === 404) {
+ core.warning(`Label '${newLabel}' does not exist`);
+ } else {
+ throw e;
}
}
}
- // Add final status label
+ // Main logic
+ const prNumber = context.payload.issue.number;
+ const comment = context.payload.comment;
+ const commenter = comment.user.login;
+ const commenterType = comment.user.type;
+ const authorAssociation = comment.author_association;
+ const body = comment.body || '';
+
+ console.log(`PR #${prNumber} - Comment by: ${commenter} (type: ${commenterType}, assoc: ${authorAssociation})`);
+
+ // Security checks
+ // SECURITY: Bot status requires BOTH username match AND verified Bot type
+ const isBot = isTrustedBot(commenter, commenterType);
+ const isCollaborator = isTrustedAssociation(authorAssociation);
+ const isACComment = isAutoClaudeComment(body);
+
+ console.log(` Trusted bot: ${isBot}, Collaborator: ${isCollaborator}, AC comment: ${isACComment}`);
+
+ if (!isBot && !isCollaborator) {
+ console.log('Skipping: Not a trusted bot or collaborator');
+ return;
+ }
+
+ if (!isACComment) {
+ console.log('Skipping: Not an Auto-Claude comment');
+ return;
+ }
+
+ const verdictResult = parseVerdict(body);
+ if (!verdictResult) {
+ console.log('Skipping: Could not parse verdict');
+ return;
+ }
+
+ console.log(`Verdict: ${verdictResult.verdict} → ${verdictResult.label}`);
+ await updateReviewLabels(prNumber, verdictResult.label);
+ console.log(`✅ PR #${prNumber} review status updated`);
+
+ # ═══════════════════════════════════════════════════════════════════════════
+ # JOB 4: RE-REVIEW ON PUSH
+ # When new commits pushed after AC approval, require re-review
+ # ═══════════════════════════════════════════════════════════════════════════
+ require-re-review:
+ name: Require Re-review on Push
+ runs-on: ubuntu-latest
+ if: github.event_name == 'pull_request' && github.event.action == 'synchronize'
+ timeout-minutes: 5
+
+ steps:
+ - name: Check and reset AC approval if needed
+ uses: actions/github-script@v7
+ with:
+ retries: 3
+ retry-exempt-status-codes: 400,401,403,404,422
+ script: |
+ const { owner, repo } = context.repo;
+ const prNumber = context.payload.pull_request.number;
+ const pusher = context.payload.sender.login;
+
+ console.log(`PR #${prNumber} - New commits by: ${pusher}`);
+
+ // Get current labels
+ const { data: labels } = await github.rest.issues.listLabelsOnIssue({
+ owner, repo, issue_number: prNumber
+ });
+ const labelNames = labels.map(l => l.name);
+
+ // Check if PR was approved
+ const wasApproved = labelNames.includes('AC: Approved');
+
+ if (!wasApproved) {
+ console.log('PR was not AC-approved, no action needed');
+ return;
+ }
+
+ console.log('PR was AC-approved, resetting to require re-review');
+
+ // Remove AC: Approved - throw on non-404 errors to prevent conflicting labels
try {
- await github.rest.issues.addLabels({
- owner,
- repo,
- issue_number: prNumber,
- labels: [newLabel]
+ await github.rest.issues.removeLabel({
+ owner, repo, issue_number: prNumber, name: 'AC: Approved'
});
- console.log(` ✓ Added: ${newLabel}`);
+ console.log(' Removed: AC: Approved');
} catch (e) {
- if (e.status === 404) {
- core.warning(`Label '${newLabel}' does not exist. Please create it in repository settings.`);
+ if (e && e.status !== 404) {
+ // Throw to prevent adding 'AC: Needs Re-review' if removal failed (could cause conflicting labels)
+ core.error(`Failed to remove 'AC: Approved' label: ${e.message}`);
+ throw e;
}
- throw e;
}
- console.log('::endgroup::');
+ // Add AC: Needs Re-review
+ try {
+ await github.rest.issues.addLabels({
+ owner, repo, issue_number: prNumber, labels: ['AC: Needs Re-review']
+ });
+ console.log(' Added: AC: Needs Re-review');
+ } catch (e) {
+ if (e && e.status === 404) {
+ core.warning("Label 'AC: Needs Re-review' does not exist");
+ } else {
+ throw e;
+ }
+ }
- // Summary
- const passedCount = results.filter(r => r.status === '✅ Passed').length;
- const skippedCount = results.filter(r => r.skipped).length;
- const failedCount = results.filter(r => r.failed).length;
+ // Post notification comment
+ const commentLines = [
+ '## 🔄 Re-review Required',
+ '',
+ 'New commits were pushed after Auto-Claude approval.',
+ '',
+ '| Previous | Current |',
+ '|----------|---------|',
+ '| `AC: Approved` | `AC: Needs Re-review` |',
+ '',
+ 'Please run Auto-Claude review again or request a manual review.',
+ '',
+ '---',
+ `Triggered by push from @${pusher}`
+ ];
- if (anyFailed) {
- console.log(`❌ PR #${prNumber} has ${failedCount} failing check(s)`);
- core.summary.addRaw(`## ❌ PR #${prNumber} - Checks Failed\n\n`);
- core.summary.addRaw(`**${failedCount}** of **${requiredChecks.length}** required checks failed.\n\n`);
- } else {
- const skippedNote = skippedCount > 0 ? ` (${skippedCount} skipped)` : '';
- const totalSuccessful = passedCount + skippedCount;
- console.log(`✅ PR #${prNumber} is ready for review (${totalSuccessful}/${requiredChecks.length} checks succeeded${skippedNote})`);
- core.summary.addRaw(`## ✅ PR #${prNumber} - Ready for Review\n\n`);
- core.summary.addRaw(`All **${requiredChecks.length}** required checks succeeded${skippedNote}.\n\n`);
- }
+ await github.rest.issues.createComment({
+ owner, repo, issue_number: prNumber, body: commentLines.join('\n')
+ });
- // Add results to summary
- core.summary.addTable([
- [{data: 'Check', header: true}, {data: 'Status', header: true}],
- ...results.map(r => [r.name, r.status])
- ]);
- await core.summary.write();
+ console.log(`✅ Posted re-review notification to PR #${prNumber}`);
diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml
index d50940c188..ac10837861 100644
--- a/.github/workflows/prepare-release.yml
+++ b/.github/workflows/prepare-release.yml
@@ -1,8 +1,10 @@
name: Prepare Release
# Triggers when code is pushed to main (e.g., merging develop → main)
-# If package.json version is newer than the latest tag, creates a new tag
-# which then triggers the release.yml workflow
+# If package.json version is newer than the latest tag:
+# 1. Validates CHANGELOG.md has an entry for this version (FAILS if missing)
+# 2. Extracts release notes from CHANGELOG.md
+# 3. Creates a new tag which triggers release.yml
on:
push:
@@ -67,8 +69,122 @@ jobs:
echo "⏭️ No release needed (package version not newer than latest tag)"
fi
- - name: Create and push tag
+ # CRITICAL: Validate CHANGELOG.md has entry for this version BEFORE creating tag
+ - name: Validate and extract changelog
if: steps.check.outputs.should_release == 'true'
+ id: changelog
+ run: |
+ VERSION="${{ steps.check.outputs.new_version }}"
+ CHANGELOG_FILE="CHANGELOG.md"
+
+ echo "🔍 Validating CHANGELOG.md for version $VERSION..."
+
+ if [ ! -f "$CHANGELOG_FILE" ]; then
+ echo "::error::CHANGELOG.md not found! Please create CHANGELOG.md with release notes."
+ exit 1
+ fi
+
+ # Extract changelog section for this version
+ # Looks for "## X.Y.Z" header and captures until next "## " or "---" or end
+ CHANGELOG_CONTENT=$(awk -v ver="$VERSION" '
+ BEGIN { found=0; content="" }
+ /^## / {
+ if (found) exit
+ # Match version at start of header (e.g., "## 2.7.3 -" or "## 2.7.3")
+ if ($2 == ver || $2 ~ "^"ver"[[:space:]]*-") {
+ found=1
+ # Skip the header line itself, we will add our own
+ next
+ }
+ }
+ /^---$/ { if (found) exit }
+ found { content = content $0 "\n" }
+ END {
+ if (!found) {
+ print "NOT_FOUND"
+ exit 1
+ }
+ # Trim leading/trailing whitespace
+ gsub(/^[[:space:]]+|[[:space:]]+$/, "", content)
+ print content
+ }
+ ' "$CHANGELOG_FILE")
+
+ if [ "$CHANGELOG_CONTENT" = "NOT_FOUND" ] || [ -z "$CHANGELOG_CONTENT" ]; then
+ echo ""
+ echo "::error::═══════════════════════════════════════════════════════════════════════"
+ echo "::error:: CHANGELOG VALIDATION FAILED"
+ echo "::error::═══════════════════════════════════════════════════════════════════════"
+ echo "::error::"
+ echo "::error:: Version $VERSION not found in CHANGELOG.md!"
+ echo "::error::"
+ echo "::error:: Before releasing, please update CHANGELOG.md with an entry like:"
+ echo "::error::"
+ echo "::error:: ## $VERSION - Your Release Title"
+ echo "::error::"
+ echo "::error:: ### ✨ New Features"
+ echo "::error:: - Feature description"
+ echo "::error::"
+ echo "::error:: ### 🐛 Bug Fixes"
+ echo "::error:: - Fix description"
+ echo "::error::"
+ echo "::error::═══════════════════════════════════════════════════════════════════════"
+ echo ""
+
+ # Also add to job summary for visibility
+ echo "## ❌ Release Blocked: Missing Changelog" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "Version **$VERSION** was not found in CHANGELOG.md." >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### How to fix:" >> $GITHUB_STEP_SUMMARY
+ echo "1. Update CHANGELOG.md with release notes for version $VERSION" >> $GITHUB_STEP_SUMMARY
+ echo "2. Commit and push the changes" >> $GITHUB_STEP_SUMMARY
+ echo "3. The release will automatically retry" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### Expected format:" >> $GITHUB_STEP_SUMMARY
+ echo "\`\`\`markdown" >> $GITHUB_STEP_SUMMARY
+ echo "## $VERSION - Release Title" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### ✨ New Features" >> $GITHUB_STEP_SUMMARY
+ echo "- Feature description" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### 🐛 Bug Fixes" >> $GITHUB_STEP_SUMMARY
+ echo "- Fix description" >> $GITHUB_STEP_SUMMARY
+ echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
+
+ exit 1
+ fi
+
+ echo "✅ Found changelog entry for version $VERSION"
+ echo ""
+ echo "--- Extracted Release Notes ---"
+ echo "$CHANGELOG_CONTENT"
+ echo "--- End Release Notes ---"
+
+ # Save changelog to file for artifact upload
+ echo "$CHANGELOG_CONTENT" > changelog-extract.md
+
+ # Also save to output (for short changelogs)
+ # Using heredoc for multiline output
+ {
+ echo "content<> $GITHUB_OUTPUT
+
+ echo "changelog_valid=true" >> $GITHUB_OUTPUT
+
+ # Upload changelog as artifact for release.yml to use
+ - name: Upload changelog artifact
+ if: steps.check.outputs.should_release == 'true' && steps.changelog.outputs.changelog_valid == 'true'
+ uses: actions/upload-artifact@v4
+ with:
+ name: changelog-${{ steps.check.outputs.new_version }}
+ path: changelog-extract.md
+ retention-days: 1
+
+ - name: Create and push tag
+ if: steps.check.outputs.should_release == 'true' && steps.changelog.outputs.changelog_valid == 'true'
run: |
VERSION="${{ steps.check.outputs.new_version }}"
TAG="v$VERSION"
@@ -85,17 +201,19 @@ jobs:
- name: Summary
run: |
- if [ "${{ steps.check.outputs.should_release }}" = "true" ]; then
+ if [ "${{ steps.check.outputs.should_release }}" = "true" ] && [ "${{ steps.changelog.outputs.changelog_valid }}" = "true" ]; then
echo "## 🚀 Release Triggered" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Version:** v${{ steps.check.outputs.new_version }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
+ echo "✅ Changelog validated and extracted from CHANGELOG.md" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
echo "The release workflow has been triggered and will:" >> $GITHUB_STEP_SUMMARY
echo "1. Build binaries for all platforms" >> $GITHUB_STEP_SUMMARY
- echo "2. Generate changelog from PRs" >> $GITHUB_STEP_SUMMARY
+ echo "2. Use changelog from CHANGELOG.md" >> $GITHUB_STEP_SUMMARY
echo "3. Create GitHub release" >> $GITHUB_STEP_SUMMARY
echo "4. Update README with new version" >> $GITHUB_STEP_SUMMARY
- else
+ elif [ "${{ steps.check.outputs.should_release }}" = "false" ]; then
echo "## ⏭️ No Release Needed" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Package version:** ${{ steps.package.outputs.version }}" >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index c6b6ddc99c..6ca7f72858 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -46,16 +46,28 @@ jobs:
- name: Install Rust toolchain (for building native Python packages)
uses: dtolnay/rust-toolchain@stable
+ - name: Cache pip wheel cache (for compiled packages like real_ladybug)
+ uses: actions/cache@v4
+ with:
+ path: ~/Library/Caches/pip
+ key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }}
+ restore-keys: |
+ pip-wheel-${{ runner.os }}-x64-
+
- name: Cache bundled Python
uses: actions/cache@v4
with:
path: apps/frontend/python-runtime
- key: python-bundle-${{ runner.os }}-x64-3.12.8-rust
+ key: python-bundle-${{ runner.os }}-x64-3.12.8-rust-${{ hashFiles('apps/backend/requirements.txt') }}
restore-keys: |
- python-bundle-${{ runner.os }}-x64-
+ python-bundle-${{ runner.os }}-x64-3.12.8-rust-
- name: Build application
run: cd apps/frontend && npm run build
+ env:
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Package macOS (Intel)
run: cd apps/frontend && npm run package:mac -- --x64
@@ -63,6 +75,9 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: ${{ secrets.MAC_CERTIFICATE }}
CSC_KEY_PASSWORD: ${{ secrets.MAC_CERTIFICATE_PASSWORD }}
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Notarize macOS Intel app
env:
@@ -93,6 +108,8 @@ jobs:
path: |
apps/frontend/dist/*.dmg
apps/frontend/dist/*.zip
+ apps/frontend/dist/*.yml
+ apps/frontend/dist/*.blockmap
# Apple Silicon build on ARM64 runner for native compilation
build-macos-arm64:
@@ -123,16 +140,28 @@ jobs:
- name: Install dependencies
run: cd apps/frontend && npm ci
+ - name: Cache pip wheel cache
+ uses: actions/cache@v4
+ with:
+ path: ~/Library/Caches/pip
+ key: pip-wheel-${{ runner.os }}-arm64-${{ hashFiles('apps/backend/requirements.txt') }}
+ restore-keys: |
+ pip-wheel-${{ runner.os }}-arm64-
+
- name: Cache bundled Python
uses: actions/cache@v4
with:
path: apps/frontend/python-runtime
- key: python-bundle-${{ runner.os }}-arm64-3.12.8
+ key: python-bundle-${{ runner.os }}-arm64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }}
restore-keys: |
- python-bundle-${{ runner.os }}-arm64-
+ python-bundle-${{ runner.os }}-arm64-3.12.8-
- name: Build application
run: cd apps/frontend && npm run build
+ env:
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Package macOS (Apple Silicon)
run: cd apps/frontend && npm run package:mac -- --arm64
@@ -140,6 +169,9 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: ${{ secrets.MAC_CERTIFICATE }}
CSC_KEY_PASSWORD: ${{ secrets.MAC_CERTIFICATE_PASSWORD }}
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Notarize macOS ARM64 app
env:
@@ -170,9 +202,17 @@ jobs:
path: |
apps/frontend/dist/*.dmg
apps/frontend/dist/*.zip
+ apps/frontend/dist/*.yml
+ apps/frontend/dist/*.blockmap
build-windows:
runs-on: windows-latest
+ permissions:
+ id-token: write # Required for OIDC authentication with Azure
+ contents: read
+ env:
+ # Job-level env so AZURE_CLIENT_ID is available for step-level if conditions
+ AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
steps:
- uses: actions/checkout@v4
@@ -200,23 +240,149 @@ jobs:
- name: Install dependencies
run: cd apps/frontend && npm ci
+ - name: Cache pip wheel cache
+ uses: actions/cache@v4
+ with:
+ path: ~\AppData\Local\pip\Cache
+ key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }}
+ restore-keys: |
+ pip-wheel-${{ runner.os }}-x64-
+
- name: Cache bundled Python
uses: actions/cache@v4
with:
path: apps/frontend/python-runtime
- key: python-bundle-${{ runner.os }}-x64-3.12.8
+ key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }}
restore-keys: |
- python-bundle-${{ runner.os }}-x64-
+ python-bundle-${{ runner.os }}-x64-3.12.8-
- name: Build application
run: cd apps/frontend && npm run build
+ env:
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Package Windows
run: cd apps/frontend && npm run package:win
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- CSC_LINK: ${{ secrets.WIN_CERTIFICATE }}
- CSC_KEY_PASSWORD: ${{ secrets.WIN_CERTIFICATE_PASSWORD }}
+ # Disable electron-builder's built-in signing (we use Azure Trusted Signing instead)
+ CSC_IDENTITY_AUTO_DISCOVERY: false
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
+
+ - name: Azure Login (OIDC)
+ if: env.AZURE_CLIENT_ID != ''
+ uses: azure/login@v2
+ with:
+ client-id: ${{ secrets.AZURE_CLIENT_ID }}
+ tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+ subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+ - name: Sign Windows executable with Azure Trusted Signing
+ if: env.AZURE_CLIENT_ID != ''
+ uses: azure/trusted-signing-action@v0.5.11
+ with:
+ endpoint: https://neu.codesigning.azure.net/
+ trusted-signing-account-name: ${{ secrets.AZURE_SIGNING_ACCOUNT }}
+ certificate-profile-name: ${{ secrets.AZURE_CERTIFICATE_PROFILE }}
+ files-folder: apps/frontend/dist
+ files-folder-filter: exe
+ file-digest: SHA256
+ timestamp-rfc3161: http://timestamp.acs.microsoft.com
+ timestamp-digest: SHA256
+
+ - name: Verify Windows executable is signed
+ if: env.AZURE_CLIENT_ID != ''
+ shell: pwsh
+ run: |
+ cd apps/frontend/dist
+ $exeFile = Get-ChildItem -Filter "*.exe" | Select-Object -First 1
+ if ($exeFile) {
+ Write-Host "Verifying signature on $($exeFile.Name)..."
+ $sig = Get-AuthenticodeSignature -FilePath $exeFile.FullName
+ if ($sig.Status -ne 'Valid') {
+ Write-Host "::error::Signature verification failed: $($sig.Status)"
+ Write-Host "::error::Status Message: $($sig.StatusMessage)"
+ exit 1
+ }
+ Write-Host "✅ Signature verified successfully"
+ Write-Host " Subject: $($sig.SignerCertificate.Subject)"
+ Write-Host " Issuer: $($sig.SignerCertificate.Issuer)"
+ Write-Host " Thumbprint: $($sig.SignerCertificate.Thumbprint)"
+ } else {
+ Write-Host "::error::No .exe file found to verify"
+ exit 1
+ }
+
+ - name: Regenerate checksums after signing
+ if: env.AZURE_CLIENT_ID != ''
+ shell: pwsh
+ run: |
+ $ErrorActionPreference = "Stop"
+ cd apps/frontend/dist
+
+ # Find the installer exe (electron-builder names it with "Setup" or just the app name)
+ # electron-builder produces one installer exe per build
+ $exeFiles = Get-ChildItem -Filter "*.exe"
+ if ($exeFiles.Count -eq 0) {
+ Write-Host "::error::No .exe files found in dist folder"
+ exit 1
+ }
+
+ Write-Host "Found $($exeFiles.Count) exe file(s): $($exeFiles.Name -join ', ')"
+
+ $ymlFile = "latest.yml"
+ if (-not (Test-Path $ymlFile)) {
+ Write-Host "::error::$ymlFile not found - cannot update checksums"
+ exit 1
+ }
+
+ $content = Get-Content $ymlFile -Raw
+ $originalContent = $content
+
+ # Process each exe file and update its hash in latest.yml
+ foreach ($exeFile in $exeFiles) {
+ Write-Host "Processing $($exeFile.Name)..."
+
+ # Compute SHA512 hash and convert to base64 (electron-builder format)
+ $bytes = [System.IO.File]::ReadAllBytes($exeFile.FullName)
+ $sha512 = [System.Security.Cryptography.SHA512]::Create()
+ $hashBytes = $sha512.ComputeHash($bytes)
+ $hash = [System.Convert]::ToBase64String($hashBytes)
+ $size = $exeFile.Length
+
+ Write-Host " Hash: $hash"
+ Write-Host " Size: $size"
+ }
+
+ # For electron-builder, latest.yml has a single file entry for the installer
+ # Update the sha512 and size for the primary exe (first one, typically the installer)
+ $primaryExe = $exeFiles | Select-Object -First 1
+ $bytes = [System.IO.File]::ReadAllBytes($primaryExe.FullName)
+ $sha512 = [System.Security.Cryptography.SHA512]::Create()
+ $hashBytes = $sha512.ComputeHash($bytes)
+ $hash = [System.Convert]::ToBase64String($hashBytes)
+ $size = $primaryExe.Length
+
+ # Update sha512 hash (base64 pattern: alphanumeric, +, /, =)
+ $content = $content -replace 'sha512: [A-Za-z0-9+/=]+', "sha512: $hash"
+ # Update size
+ $content = $content -replace 'size: \d+', "size: $size"
+
+ if ($content -eq $originalContent) {
+ Write-Host "::error::Checksum replacement failed - content unchanged. Check if latest.yml format has changed."
+ exit 1
+ }
+
+ Set-Content -Path $ymlFile -Value $content -NoNewline
+ Write-Host "✅ Updated $ymlFile with new base64 hash and size for $($primaryExe.Name)"
+
+ - name: Skip signing notice
+ if: env.AZURE_CLIENT_ID == ''
+ run: echo "::warning::Windows signing skipped - AZURE_CLIENT_ID not configured. The .exe will be unsigned."
- name: Upload artifacts
uses: actions/upload-artifact@v4
@@ -224,6 +390,8 @@ jobs:
name: windows-builds
path: |
apps/frontend/dist/*.exe
+ apps/frontend/dist/*.yml
+ apps/frontend/dist/*.blockmap
build-linux:
runs-on: ubuntu-latest
@@ -261,21 +429,36 @@ jobs:
flatpak install -y --user flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08
flatpak install -y --user flathub org.electronjs.Electron2.BaseApp//25.08
+ - name: Cache pip wheel cache
+ uses: actions/cache@v4
+ with:
+ path: ~/.cache/pip
+ key: pip-wheel-${{ runner.os }}-x64-${{ hashFiles('apps/backend/requirements.txt') }}
+ restore-keys: |
+ pip-wheel-${{ runner.os }}-x64-
+
- name: Cache bundled Python
uses: actions/cache@v4
with:
path: apps/frontend/python-runtime
- key: python-bundle-${{ runner.os }}-x64-3.12.8
+ key: python-bundle-${{ runner.os }}-x64-3.12.8-${{ hashFiles('apps/backend/requirements.txt') }}
restore-keys: |
- python-bundle-${{ runner.os }}-x64-
+ python-bundle-${{ runner.os }}-x64-3.12.8-
- name: Build application
run: cd apps/frontend && npm run build
+ env:
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Package Linux
run: cd apps/frontend && npm run package:linux
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ SENTRY_DSN: ${{ secrets.SENTRY_DSN }}
+ SENTRY_TRACES_SAMPLE_RATE: ${{ secrets.SENTRY_TRACES_SAMPLE_RATE }}
+ SENTRY_PROFILES_SAMPLE_RATE: ${{ secrets.SENTRY_PROFILES_SAMPLE_RATE }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@@ -285,6 +468,8 @@ jobs:
apps/frontend/dist/*.AppImage
apps/frontend/dist/*.deb
apps/frontend/dist/*.flatpak
+ apps/frontend/dist/*.yml
+ apps/frontend/dist/*.blockmap
create-release:
needs: [build-macos-intel, build-macos-arm64, build-windows, build-linux]
@@ -304,16 +489,30 @@ jobs:
- name: Flatten and validate artifacts
run: |
mkdir -p release-assets
- find dist -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) -exec cp {} release-assets/ \;
+ find dist -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" -o -name "*.yml" -o -name "*.blockmap" \) -exec cp {} release-assets/ \;
- # Validate that at least one artifact was copied
- artifact_count=$(find release-assets -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) | wc -l)
- if [ "$artifact_count" -eq 0 ]; then
- echo "::error::No build artifacts found! Expected .dmg, .zip, .exe, .AppImage, .deb, or .flatpak files."
+ # Validate that installer files exist (not just manifests)
+ installer_count=$(find release-assets -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) | wc -l)
+ if [ "$installer_count" -eq 0 ]; then
+ echo "::error::No installer artifacts found! Expected .dmg, .zip, .exe, .AppImage, .deb, or .flatpak files."
exit 1
fi
- echo "Found $artifact_count artifact(s):"
+ echo "Found $installer_count installer(s):"
+ find release-assets -type f \( -name "*.dmg" -o -name "*.zip" -o -name "*.exe" -o -name "*.AppImage" -o -name "*.deb" -o -name "*.flatpak" \) -exec basename {} \;
+
+ # Validate that electron-updater manifest files are present (required for auto-updates)
+ yml_count=$(find release-assets -type f -name "*.yml" | wc -l)
+ if [ "$yml_count" -eq 0 ]; then
+ echo "::error::No update manifest (.yml) files found! Auto-update architecture detection will not work."
+ exit 1
+ fi
+
+ echo "Found $yml_count manifest file(s):"
+ find release-assets -type f -name "*.yml" -exec basename {} \;
+
+ echo ""
+ echo "All release assets:"
ls -la release-assets/
- name: Generate checksums
@@ -473,23 +672,78 @@ jobs:
cat release-assets/checksums.sha256 >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
- - name: Generate changelog
- if: ${{ github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.dry_run != true) }}
+ - name: Extract changelog from CHANGELOG.md
+ if: ${{ github.event_name == 'push' }}
id: changelog
- uses: release-drafter/release-drafter@v6
- with:
- config-name: release-drafter.yml
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ # Extract version from tag (v2.7.2 -> 2.7.2)
+ VERSION=${GITHUB_REF_NAME#v}
+ CHANGELOG_FILE="CHANGELOG.md"
+
+ echo "📋 Extracting release notes for version $VERSION from CHANGELOG.md..."
+
+ if [ ! -f "$CHANGELOG_FILE" ]; then
+ echo "::warning::CHANGELOG.md not found, using minimal release notes"
+ echo "body=Release v$VERSION" >> $GITHUB_OUTPUT
+ exit 0
+ fi
+
+ # Extract changelog section for this version
+ # Looks for "## X.Y.Z" header and captures until next "## " or "---"
+ CHANGELOG_CONTENT=$(awk -v ver="$VERSION" '
+ BEGIN { found=0; content="" }
+ /^## / {
+ if (found) exit
+ # Match version at start of header (e.g., "## 2.7.3 -" or "## 2.7.3")
+ if ($2 == ver || $2 ~ "^"ver"[[:space:]]*-") {
+ found=1
+ next
+ }
+ }
+ /^---$/ { if (found) exit }
+ found { content = content $0 "\n" }
+ END {
+ if (!found) {
+ print "NOT_FOUND"
+ exit 0
+ }
+ # Trim leading/trailing whitespace
+ gsub(/^[[:space:]]+|[[:space:]]+$/, "", content)
+ print content
+ }
+ ' "$CHANGELOG_FILE")
+
+ if [ "$CHANGELOG_CONTENT" = "NOT_FOUND" ] || [ -z "$CHANGELOG_CONTENT" ]; then
+ echo "::warning::Version $VERSION not found in CHANGELOG.md, using minimal release notes"
+ CHANGELOG_CONTENT="Release v$VERSION
+
+See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details."
+ fi
+
+ echo "✅ Extracted changelog content"
+
+ # Save to file first (more reliable for multiline)
+ echo "$CHANGELOG_CONTENT" > changelog-body.md
+
+ # Use file-based output for multiline content
+ {
+ echo "body<> $GITHUB_OUTPUT
- name: Create Release
- if: ${{ github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.dry_run != true) }}
+ if: ${{ github.event_name == 'push' }}
uses: softprops/action-gh-release@v2
with:
body: |
${{ steps.changelog.outputs.body }}
+ ---
+
${{ steps.virustotal.outputs.vt_results }}
+
+ **Full Changelog**: https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md
files: release-assets/*
draft: false
prerelease: ${{ contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
@@ -500,7 +754,8 @@ jobs:
update-readme:
needs: [create-release]
runs-on: ubuntu-latest
- if: ${{ github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.dry_run != true) }}
+ # Only update README on actual releases (tag push), not dry runs
+ if: ${{ github.event_name == 'push' }}
permissions:
contents: write
steps:
diff --git a/.gitignore b/.gitignore
index 7f53e4c59a..6d2e458532 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,6 +14,7 @@ Desktop.ini
.env
.env.*
!.env.example
+/config.json
*.pem
*.key
*.crt
@@ -163,3 +164,7 @@ _bmad-output/
.claude/
/docs
OPUS_ANALYSIS_AND_IDEAS.md
+/.github/agents
+
+# Auto Claude generated files
+.security-key
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f67b77c813..0f996bccc2 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,5 +1,6 @@
repos:
# Version sync - propagate root package.json version to all files
+ # NOTE: Skip in worktrees - version sync modifies root files which don't exist in worktree
- repo: local
hooks:
- id: version-sync
@@ -8,6 +9,12 @@ repos:
args:
- -c
- |
+ # Skip in worktrees - .git is a file pointing to main repo, not a directory
+ # Version sync modifies root-level files that may not exist in worktree context
+ if [ -f ".git" ]; then
+ echo "Skipping version-sync in worktree (root files not accessible)"
+ exit 0
+ fi
VERSION=$(node -p "require('./package.json').version")
if [ -n "$VERSION" ]; then
@@ -81,6 +88,7 @@ repos:
# Python tests (apps/backend/) - skip slow/integration tests for pre-commit speed
# Tests to skip: graphiti (external deps), merge_file_tracker/service_orchestrator/worktree/workspace (Windows path/git issues)
+ # NOTE: Skip this hook in worktrees (where .git is a file, not a directory)
- repo: local
hooks:
- id: pytest
@@ -89,6 +97,12 @@ repos:
args:
- -c
- |
+ # Skip in worktrees - .git is a file pointing to main repo, not a directory
+ # This prevents path resolution issues with ../../tests/ in worktree context
+ if [ -f ".git" ]; then
+ echo "Skipping pytest in worktree (path resolution would fail)"
+ exit 0
+ fi
cd apps/backend
if [ -f ".venv/bin/pytest" ]; then
PYTEST_CMD=".venv/bin/pytest"
@@ -113,18 +127,37 @@ repos:
pass_filenames: false
# Frontend linting (apps/frontend/)
+ # NOTE: These hooks check for worktree context to avoid npm/node_modules issues
- repo: local
hooks:
- id: eslint
name: ESLint
- entry: bash -c 'cd apps/frontend && npm run lint'
+ entry: bash
+ args:
+ - -c
+ - |
+ # Skip in worktrees if node_modules doesn't exist (dependencies not installed)
+ if [ -f ".git" ] && [ ! -d "apps/frontend/node_modules" ]; then
+ echo "Skipping ESLint in worktree (node_modules not found)"
+ exit 0
+ fi
+ cd apps/frontend && npm run lint
language: system
files: ^apps/frontend/.*\.(ts|tsx|js|jsx)$
pass_filenames: false
- id: typecheck
name: TypeScript Check
- entry: bash -c 'cd apps/frontend && npm run typecheck'
+ entry: bash
+ args:
+ - -c
+ - |
+ # Skip in worktrees if node_modules doesn't exist (dependencies not installed)
+ if [ -f ".git" ] && [ ! -d "apps/frontend/node_modules" ]; then
+ echo "Skipping TypeScript check in worktree (node_modules not found)"
+ exit 0
+ fi
+ cd apps/frontend && npm run typecheck
language: system
files: ^apps/frontend/.*\.(ts|tsx)$
pass_filenames: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2fb1a26e82..22c43eb8da 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,283 @@
+## 2.7.2 - Stability & Performance Enhancements
+
+### ✨ New Features
+
+- Added refresh button to Kanban board for manually reloading tasks
+
+- Terminal dropdown with built-in and external options in task review
+
+- Centralized CLI tool path management with customizable settings
+
+- Files tab in task details panel for better file organization
+
+- Enhanced PR review page with filtering capabilities
+
+- GitLab integration support
+
+- Automated PR review with follow-up support and structured outputs
+
+- UI scale feature with 75-200% range for accessibility
+
+- Python 3.12 bundled with packaged Electron app
+
+- OpenRouter support as LLM/embedding provider
+
+- Internationalization (i18n) system for multi-language support
+
+- Flatpak packaging support for Linux
+
+- Path-aware AI merge resolution with device code streaming
+
+### 🛠️ Improvements
+
+- Improved terminal experience with persistent state when switching projects
+
+- Enhanced PR review with structured outputs and fork support
+
+- Better UX for display and scaling changes
+
+- Convert synchronous I/O to async operations in worktree handlers
+
+- Enhanced logs for commit linting stage
+
+- Remove top navigation bars for cleaner UI
+
+- Enhanced PR detail area visual design
+
+- Improved CLI tool detection with more language support
+
+- Added iOS/Swift project detection
+
+- Optimize performance by removing projectTabs from useEffect dependencies
+
+- Improved Python detection and version validation for compatibility
+
+### 🐛 Bug Fixes
+
+- Fixed CI Python setup and PR status gate checks
+
+- Fixed cross-platform CLI path detection and clearing in settings
+
+- Preserve original task description after spec creation
+
+- Fixed learning loop to retrieve patterns and gotchas from memory
+
+- Resolved frontend lag and updated dependencies
+
+- Fixed Content-Security-Policy to allow external HTTPS images
+
+- Fixed PR review isolation by using temporary worktree
+
+- Fixed Homebrew Python detection to prefer versioned Python over system python3
+
+- Added support for Bun 1.2.0+ lock file format detection
+
+- Fixed infinite re-render loop in task selection
+
+- Fixed infinite loop in task detail merge preview loading
+
+- Resolved Windows EINVAL error when opening worktree in VS Code
+
+- Fixed fallback to prevent tasks stuck in ai_review status
+
+- Fixed SDK permissions to include spec_dir
+
+- Added --base-branch argument support to spec_runner
+
+- Allow Windows to run CC PR Reviewer
+
+- Fixed model selection to respect task_metadata.json
+
+- Improved GitHub PR review by passing repo parameter explicitly
+
+- Fixed electron-log imports with .js extension
+
+- Fixed Swift detection order in project analyzer
+
+- Prevent TaskEditDialog from unmounting when opened
+
+- Fixed subprocess handling for Python paths with spaces
+
+- Fixed file system race conditions and unused variables in security scanning
+
+- Resolved Python detection and backend packaging issues
+
+- Fixed version-specific links in README and pre-commit hooks
+
+- Fixed task status persistence reverting on refresh
+
+- Proper semver comparison for pre-release versions
+
+- Use virtual environment Python for all services to fix dotenv errors
+
+- Fixed explicit Windows System32 tar path for builds
+
+- Added augmented PATH environment to all GitHub CLI calls
+
+- Use PowerShell for tar extraction on Windows
+
+- Added --force-local flag to tar on Windows
+
+- Stop tracking spec files in git
+
+- Fixed GitHub API calls with explicit GET method for comment fetches
+
+- Support archiving tasks across all worktree locations
+
+- Validated backend source path before using it
+
+- Resolved spawn Python ENOENT error on Linux
+
+- Fixed CodeQL alerts for uncontrolled command line
+
+- Resolved GitHub follow-up review API issues
+
+- Fixed relative path normalization to POSIX format
+
+- Accepted bug_fix workflow_type alias during planning
+
+- Added global spec numbering lock to prevent collisions
+
+- Fixed ideation status sync
+
+- Stopped running process when task status changes away from in_progress
+
+- Removed legacy path from auto-claude source detection
+
+- Resolved Python environment race condition
+
+---
+
+## What's Changed
+
+- fix(ci): add Python setup to beta-release and fix PR status gate checks (#565) by @Andy in c2148bb9
+- fix: detect and clear cross-platform CLI paths in settings (#535) by @Andy in 29e45505
+- fix(ui): preserve original task description after spec creation (#536) by @Andy in 7990dcb4
+- fix(memory): fix learning loop to retrieve patterns and gotchas (#530) by @Andy in f58c2578
+- fix: resolve frontend lag and update dependencies (#526) by @Andy in 30f7951a
+- feat(kanban): add refresh button to manually reload tasks (#548) by @Adryan Serage in 252242f9
+- fix(csp): allow external HTTPS images in Content-Security-Policy (#549) by @Michael Ludlow in 3db02c5d
+- fix(pr-review): use temporary worktree for PR review isolation (#532) by @Andy in 344ec65e
+- fix: prefer versioned Homebrew Python over system python3 (#494) by @Navid in 8d58dd6f
+- fix(detection): support bun.lock text format for Bun 1.2.0+ (#525) by @Andy in 4da8cd66
+- chore: bump version to 2.7.2-beta.12 (#460) by @Andy in 8e5c11ac
+- Fix/windows issues (#471) by @Andy in 72106109
+- fix(ci): add Rust toolchain for Intel Mac builds (#459) by @Andy in 52a4fcc6
+- fix: create spec.md during roadmap-to-task conversion (#446) by @Mulaveesala Pranaveswar in fb6b7fc6
+- fix(pr-review): treat LOW-only findings as ready to merge (#455) by @Andy in 0f9c5b84
+- Fix/2.7.2 beta12 (#424) by @Andy in 5d8ede23
+- feat: remove top bars (#386) by @Vinícius Santos in da31b687
+- fix: prevent infinite re-render loop in task selection useEffect (#442) by @Abe Diaz in 2effa535
+- fix: accept Python 3.12+ in install-backend.js (#443) by @Abe Diaz in c15bb311
+- fix: infinite loop in useTaskDetail merge preview loading (#444) by @Abe Diaz in 203a970a
+- fix(windows): resolve EINVAL error when opening worktree in VS Code (#434) by @Vinícius Santos in 3c0708b7
+- feat(frontend): Add Files tab to task details panel (#430) by @Mitsu in 666794b5
+- refactor: remove deprecated TaskDetailPanel component (#432) by @Mitsu in ac8dfcac
+- fix(ui): add fallback to prevent tasks stuck in ai_review status (#397) by @Michael Ludlow in 798ca79d
+- feat: Enhance the look of the PR Detail area (#427) by @Alex in bdb01549
+- ci: remove conventional commits PR title validation workflow by @AndyMik90 in 515b73b5
+- fix(client): add spec_dir to SDK permissions (#429) by @Mitsu in 88c76059
+- fix(spec_runner): add --base-branch argument support (#428) by @Mitsu in 62a75515
+- feat: enhance pr review page to include PRs filters (#423) by @Alex in 717fba04
+- feat: add gitlab integration (#254) by @Mitsu in 0a571d3a
+- fix: Allow windows to run CC PR Reviewer (#406) by @Alex in 2f662469
+- fix(model): respect task_metadata.json model selection (#415) by @Andy in e7e6b521
+- feat(build): add Flatpak packaging support for Linux (#404) by @Mitsu in 230de5fc
+- fix(github): pass repo parameter to GHClient for explicit PR resolution (#413) by @Andy in 4bdf7a0c
+- chore(ci): remove redundant CLA GitHub Action workflow by @AndyMik90 in a39ea49d
+- fix(frontend): add .js extension to electron-log/main imports by @AndyMik90 in 9aef0dd0
+- fix: 2.7.2 bug fixes and improvements (#388) by @Andy in 05131217
+- fix(analyzer): move Swift detection before Ruby detection (#401) by @Michael Ludlow in 321c9712
+- fix(ui): prevent TaskEditDialog from unmounting when opened (#395) by @Michael Ludlow in 98b12ed8
+- fix: improve CLI tool detection and add Claude CLI path settings (#393) by @Joe in aaa83131
+- feat(analyzer): add iOS/Swift project detection (#389) by @Michael Ludlow in 68548e33
+- fix(github): improve PR review with structured outputs and fork support (#363) by @Andy in 7751588e
+- fix(ideation): update progress calculation to include just-completed ideation type (#381) by @Illia Filippov in 8b4ce58c
+- Fixes failing spec - "gh CLI Check Handler - should return installed: true when gh CLI is found" (#370) by @Ian in bc220645
+- fix: Memory Status card respects configured embedding provider (#336) (#373) by @Michael Ludlow in db0cbea3
+- fix: fixed version-specific links in readme and pre-commit hook that updates them (#378) by @Ian in 0ca2e3f6
+- docs: add security research documentation (#361) by @Brian in 2d3b7fb4
+- fix/Improving UX for Display/Scaling Changes (#332) by @Kevin Rajan in 9bbdef09
+- fix(perf): remove projectTabs from useEffect deps to fix re-render loop (#362) by @Michael Ludlow in 753dc8bb
+- fix(security): invalidate profile cache when file is created/modified (#355) by @Michael Ludlow in 20f20fa3
+- fix(subprocess): handle Python paths with spaces (#352) by @Michael Ludlow in eabe7c7d
+- fix: Resolve pre-commit hook failures with version sync, pytest path, ruff version, and broken quality-dco workflow (#334) by @Ian in 1fa7a9c7
+- fix(terminal): preserve terminal state when switching projects (#358) by @Andy in 7881b2d1
+- fix(analyzer): add C#/Java/Swift/Kotlin project files to security hash (#351) by @Michael Ludlow in 4e71361b
+- fix: make backend tests pass on Windows (#282) by @Oluwatosin Oyeladun in 4dcc5afa
+- fix(ui): close parent modal when Edit dialog opens (#354) by @Michael Ludlow in e9782db0
+- chore: bump version to 2.7.2-beta.10 by @AndyMik90 in 40d04d7c
+- feat: add terminal dropdown with inbuilt and external options in task review (#347) by @JoshuaRileyDev in fef07c95
+- refactor: remove deprecated code across backend and frontend (#348) by @Mitsu in 9d43abed
+- feat: centralize CLI tool path management (#341) by @HSSAINI Saad in d51f4562
+- refactor(components): remove deprecated TaskDetailPanel re-export (#344) by @Mitsu in 787667e9
+- chore: Refactor/kanban realtime status sync (#249) by @souky-byte in 9734b70b
+- refactor(settings): remove deprecated ProjectSettings modal and hooks (#343) by @Mitsu in fec6b9f3
+- perf: convert synchronous I/O to async operations in worktree handlers (#337) by @JoshuaRileyDev in d3a63b09
+- feat: bump version (#329) by @Alex in 50e3111a
+- fix(ci): remove version bump to fix branch protection conflict (#325) by @Michael Ludlow in 8a80b1d5
+- fix(tasks): sync status to worktree implementation plan to prevent reset (#243) (#323) by @Alex in cb6b2165
+- fix(ci): add auto-updater manifest files and version auto-update (#317) by @Michael Ludlow in 661e47c3
+- fix(project): fix task status persistence reverting on refresh (#246) (#318) by @Michael Ludlow in e80ef79d
+- fix(updater): proper semver comparison for pre-release versions (#313) by @Michael Ludlow in e1b0f743
+- fix(python): use venv Python for all services to fix dotenv errors (#311) by @Alex in 92c6f278
+- chore(ci): cancel in-progress runs (#302) by @Oluwatosin Oyeladun in 1c142273
+- fix(build): use explicit Windows System32 tar path (#308) by @Andy in c0a02a45
+- fix(github): add augmented PATH env to all gh CLI calls by @AndyMik90 in 086429cb
+- fix(build): use PowerShell for tar extraction on Windows by @AndyMik90 in d9fb8f29
+- fix(build): add --force-local flag to tar on Windows (#303) by @Andy in d0b0b3df
+- fix: stop tracking spec files in git (#295) by @Andy in 937a60f8
+- Fix/2.7.2 fixes (#300) by @Andy in 7a51cbd5
+- feat(merge,oauth): add path-aware AI merge resolution and device code streaming (#296) by @Andy in 26beefe3
+- feat: enhance the logs for the commit linting stage (#293) by @Alex in 8416f307
+- fix(github): add explicit GET method to gh api comment fetches (#294) by @Andy in 217249c8
+- fix(frontend): support archiving tasks across all worktree locations (#286) by @Andy in 8bb3df91
+- Potential fix for code scanning alert no. 224: Uncontrolled command line (#285) by @Andy in 5106c6e9
+- fix(frontend): validate backend source path before using it (#287) by @Andy in 3ff61274
+- feat(python): bundle Python 3.12 with packaged Electron app (#284) by @Andy in 7f19c2e1
+- fix: resolve spawn python ENOENT error on Linux by using getAugmentedEnv() (#281) by @Todd W. Bucy in d98e2830
+- fix(ci): add write permissions to beta-release update-version job by @AndyMik90 in 0b874d4b
+- chore(deps): bump @xterm/xterm from 5.5.0 to 6.0.0 in /apps/frontend (#270) by @dependabot[bot] in 50dd1078
+- fix(github): resolve follow-up review API issues by @AndyMik90 in f1cc5a09
+- fix(security): resolve CodeQL file system race conditions and unused variables (#277) by @Andy in b005fa5c
+- fix(ci): use correct electron-builder arch flags (#278) by @Andy in d79f2da4
+- chore(deps): bump jsdom from 26.1.0 to 27.3.0 in /apps/frontend (#268) by @dependabot[bot] in 5ac566e2
+- chore(deps): bump typescript-eslint in /apps/frontend (#269) by @dependabot[bot] in f49d4817
+- fix(ci): use develop branch for dry-run builds in beta-release workflow (#276) by @Andy in 1e1d7d9b
+- fix: accept bug_fix workflow_type alias during planning (#240) by @Daniel Frey in e74a3dff
+- fix(paths): normalize relative paths to posix (#239) by @Daniel Frey in 6ac8250b
+- chore(deps): bump @electron/rebuild in /apps/frontend (#271) by @dependabot[bot] in a2cee694
+- chore(deps): bump vitest from 4.0.15 to 4.0.16 in /apps/frontend (#272) by @dependabot[bot] in d4cad80a
+- feat(github): add automated PR review with follow-up support (#252) by @Andy in 596e9513
+- ci: implement enterprise-grade PR quality gates and security scanning (#266) by @Alex in d42041c5
+- fix: update path resolution for ollama_model_detector.py in memory handlers (#263) by @delyethan in a3f87540
+- feat: add i18n internationalization system (#248) by @Mitsu in f8438112
+- Revert "Feat/Auto Fix Github issues and do extensive AI PR reviews (#250)" (#251) by @Andy in 5e8c5308
+- Feat/Auto Fix Github issues and do extensive AI PR reviews (#250) by @Andy in 348de6df
+- fix: resolve Python detection and backend packaging issues (#241) by @HSSAINI Saad in 0f7d6e05
+- fix: add future annotations import to discovery.py (#229) by @Joris Slagter in 5ccdb6ab
+- Fix/ideation status sync (#212) by @souky-byte in 6ec8549f
+- fix(core): add global spec numbering lock to prevent collisions (#209) by @Andy in 53527293
+- feat: Add OpenRouter as LLM/embedding provider (#162) by @Fernando Possebon in 02bef954
+- fix: Add Python 3.10+ version validation and GitHub Actions Python setup (#180 #167) (#208) by @Fernando Possebon in f168bdc3
+- fix(ci): correct welcome workflow PR message (#206) by @Andy in e3eec68a
+- Feat/beta release (#193) by @Andy in 407a0bee
+- feat/beta-release (#190) by @Andy in 8f766ad1
+- fix/PRs from old main setup to apps structure (#185) by @Andy in ced2ad47
+- fix: hide status badge when execution phase badge is showing (#154) by @Andy in 05f5d303
+- feat: Add UI scale feature with 75-200% range (#125) by @Enes Cingöz in 6951251b
+- fix(task): stop running process when task status changes away from in_progress by @AndyMik90 in 30e7536b
+- Fix/linear 400 error by @Andy in 220faf0f
+- fix: remove legacy path from auto-claude source detection (#148) by @Joris Slagter in f96c6301
+- fix: resolve Python environment race condition (#142) by @Joris Slagter in ebd8340d
+- Feat: Ollama download progress tracking with new apps structure (#141) by @rayBlock in df779530
+- Feature/apps restructure v2.7.2 (#138) by @Andy in 0adaddac
+- docs: Add Git Flow branching strategy to CONTRIBUTING.md by @AndyMik90 in 91f7051d
+
+## Thanks to all contributors
+
+@Andy, @Adryan Serage, @Michael Ludlow, @Navid, @Mulaveesala Pranaveswar, @Vinícius Santos, @Abe Diaz, @Mitsu, @Alex, @AndyMik90, @Joe, @Illia Filippov, @Ian, @Brian, @Kevin Rajan, @Oluwatosin Oyeladun, @JoshuaRileyDev, @HSSAINI Saad, @souky-byte, @Todd W. Bucy, @dependabot[bot], @Daniel Frey, @delyethan, @Joris Slagter, @Fernando Possebon, @Enes Cingöz, @rayBlock
+
## 2.7.1 - Build Pipeline Enhancements
### 🛠️ Improvements
diff --git a/INVESTIGATION.md b/INVESTIGATION.md
new file mode 100644
index 0000000000..2daae34b7b
--- /dev/null
+++ b/INVESTIGATION.md
@@ -0,0 +1,318 @@
+# Root Cause Investigation: Task Workflow Halts After Planning Stage
+
+## Investigation Summary
+
+After adding comprehensive logging to the task loading and plan update pipeline, I've analyzed the data flow from backend to frontend to identify why subtasks fail to display after spec completion.
+
+## Data Flow Analysis
+
+### Current Architecture
+
+```
+Backend (Python)
+ ↓
+Creates implementation_plan.json
+ ↓
+Emits IPC event: 'task:progress' with plan data
+ ↓
+Frontend (Electron Renderer)
+ ↓
+useIpc.ts: onTaskProgress handler (batched)
+ ↓
+task-store.ts: updateTaskFromPlan(taskId, plan)
+ ↓
+Creates subtasks from plan.phases.flatMap(phase => phase.subtasks)
+ ↓
+UI: TaskSubtasks.tsx renders subtasks
+```
+
+### Critical Code Paths
+
+**1. Plan Update Handler** (`apps/frontend/src/renderer/hooks/useIpc.ts:131-135`)
+```typescript
+window.electronAPI.onTaskProgress(
+ (taskId: string, plan: ImplementationPlan) => {
+ queueUpdate(taskId, { plan });
+ }
+);
+```
+
+**2. Subtask Creation** (`apps/frontend/src/renderer/stores/task-store.ts:124-133`)
+```typescript
+const subtasks: Subtask[] = plan.phases.flatMap((phase) =>
+ phase.subtasks.map((subtask) => ({
+ id: subtask.id,
+ title: subtask.description,
+ description: subtask.description,
+ status: subtask.status,
+ files: [],
+ verification: subtask.verification as Subtask['verification']
+ }))
+);
+```
+
+**3. Initial Task Loading** (`apps/frontend/src/main/project-store.ts:461-470`)
+```typescript
+const subtasks = plan?.phases?.flatMap((phase) => {
+ const items = phase.subtasks || (phase as { chunks?: PlanSubtask[] }).chunks || [];
+ return items.map((subtask) => ({
+ id: subtask.id,
+ title: subtask.description,
+ description: subtask.description,
+ status: subtask.status,
+ files: []
+ }));
+}) || [];
+```
+
+## Root Cause Identification
+
+### Primary Root Cause: Early Plan Update Event with Empty Phases
+
+**What's Happening:**
+
+1. **Backend creates `implementation_plan.json` in stages:**
+ - First writes the file with minimal structure: `{ "feature": "...", "phases": [] }`
+ - Then adds phases and subtasks incrementally
+ - Emits IPC event each time the plan is updated
+
+2. **Frontend receives the FIRST plan update event:**
+ - Plan has `feature` and basic metadata
+ - **But `phases` array is EMPTY: `[]`**
+ - `updateTaskFromPlan` is called with this incomplete plan
+ - Subtasks are created as empty array: `plan.phases.flatMap(...)` → `[]`
+
+3. **Later plan updates with full subtask data are ignored:**
+ - When backend writes the complete plan with subtasks
+ - Another IPC event is emitted
+ - But due to race conditions or event handling issues, this update doesn't reach the frontend
+ - Or it does reach but the task UI doesn't refresh
+
+**Evidence from Code:**
+
+Looking at `updateTaskFromPlan` (task-store.ts:106-190):
+- Line 108-114: Logs show `phases: plan.phases?.length || 0`
+- Line 112: If plan has 0 phases, `totalSubtasks` will be 0
+- Line 124-133: `plan.phases.flatMap(...)` on empty array creates `subtasks = []`
+- **No validation to check if plan is complete before updating state**
+
+**Why "!" Indicators Appear:**
+
+The "!" indicators likely come from the UI attempting to render subtasks when:
+- Subtask count shows as 18 (from later plan update metadata)
+- But `task.subtasks` array is actually empty `[]` (from early plan update)
+- This mismatch causes the UI to show warning indicators
+
+### Secondary Contributing Factors
+
+**A. No Plan Validation Before State Update**
+
+Current code in `updateTaskFromPlan` immediately creates subtasks from whatever plan data it receives:
+```typescript
+const subtasks: Subtask[] = plan.phases.flatMap((phase) =>
+ phase.subtasks.map((subtask) => ({ ... }))
+);
+```
+
+**Problem:** No check if plan is "ready" or "complete" before updating state.
+
+**B. Missing Reload Trigger After Spec Completion**
+
+When spec creation completes and the full plan is written:
+- The IPC event might not fire again
+- Or the event fires but the batching mechanism drops it
+- Frontend state remains stuck with empty subtasks
+
+**C. Race Condition in Batch Update Queue**
+
+In `useIpc.ts:92-112`, the batching mechanism queues updates:
+```typescript
+function queueUpdate(taskId: string, update: BatchedUpdate): void {
+ const existing = batchQueue.get(taskId) || {};
+ batchQueue.set(taskId, { ...existing, ...update });
+}
+```
+
+**Problem:** If two plan updates arrive within 16ms:
+- First update has empty phases: `{ plan: { phases: [] } }`
+- Second update has full phases: `{ plan: { phases: [...18 subtasks...] } }`
+- Second update **overwrites** first in the queue
+- But if order gets reversed, empty plan overwrites full plan
+
+## Log Evidence to Look For
+
+To confirm this root cause, check console logs for:
+
+### 1. Plan Loading Sequence
+```
+[updateTaskFromPlan] called with plan:
+ taskId: "xxx"
+ feature: "..."
+ phases: 0 ← SMOKING GUN: phases array is empty
+ totalSubtasks: 0 ← No subtasks
+```
+
+If you see `phases: 0` followed later by no update with `phases: 3` (or more), the early empty plan is stuck in state.
+
+### 2. Multiple Plan Updates
+```
+[updateTaskFromPlan] called with plan:
+ phases: 0
+ totalSubtasks: 0
+
+[updateTaskFromPlan] called with plan: ← This might never appear
+ phases: 3
+ totalSubtasks: 18
+```
+
+If second log never appears, the plan update event isn't firing after spec completion.
+
+### 3. Project Store Loading
+```
+[ProjectStore] Loading implementation_plan.json for spec: xxx
+[ProjectStore] Loaded plan for xxx:
+ phaseCount: 0 ← Empty plan loaded from disk
+ subtaskCount: 0
+```
+
+If plan file on disk has empty phases, the issue is in backend plan writing.
+
+### 4. Plan File Utils
+```
+[plan-file-utils] Reading implementation_plan.json to update status
+[plan-file-utils] Successfully persisted status ← Plan exists but might be incomplete
+```
+
+Check if plan file reads/writes are happening during spec creation.
+
+## Proposed Fix Approach
+
+### Fix 1: Add Plan Completeness Validation (Immediate Fix)
+
+**File:** `apps/frontend/src/renderer/stores/task-store.ts`
+
+**Change:** Only update subtasks if plan has valid phases and subtasks:
+
+```typescript
+updateTaskFromPlan: (taskId, plan) =>
+ set((state) => {
+ console.log('[updateTaskFromPlan] called with plan:', { ... });
+
+ const index = findTaskIndex(state.tasks, taskId);
+ if (index === -1) {
+ console.log('[updateTaskFromPlan] Task not found:', taskId);
+ return state;
+ }
+
+ // VALIDATION: Don't update if plan is incomplete
+ if (!plan.phases || plan.phases.length === 0) {
+ console.warn('[updateTaskFromPlan] Plan has no phases, skipping update:', taskId);
+ return state; // Keep existing state, don't overwrite with empty data
+ }
+
+ const totalSubtasks = plan.phases.reduce((acc, p) => acc + (p.subtasks?.length || 0), 0);
+ if (totalSubtasks === 0) {
+ console.warn('[updateTaskFromPlan] Plan has no subtasks, skipping update:', taskId);
+ return state; // Keep existing state
+ }
+
+ // ... rest of existing code to create subtasks ...
+ })
+```
+
+### Fix 2: Trigger Reload After Spec Completion (Comprehensive Fix)
+
+**File:** `apps/frontend/src/renderer/hooks/useIpc.ts`
+
+**Change:** Add explicit "spec completed" event handler that reloads the task:
+
+```typescript
+// Add new IPC event listener
+const cleanupSpecComplete = window.electronAPI.onSpecComplete(
+ async (taskId: string) => {
+ console.log('[IPC] Spec completed for task:', taskId);
+ // Force reload the task from disk to get the complete plan
+ const task = useTaskStore.getState().tasks.find(t => t.id === taskId);
+ if (task) {
+ // Reload plan from file
+ const result = await window.electronAPI.getTaskPlan(task.projectId, taskId);
+ if (result.success && result.data) {
+ updateTaskFromPlan(taskId, result.data);
+ }
+ }
+ }
+);
+```
+
+### Fix 3: Prevent Plan Overwrite in Batch Queue (Race Condition Fix)
+
+**File:** `apps/frontend/src/renderer/hooks/useIpc.ts`
+
+**Change:** Don't overwrite plan if incoming plan has fewer subtasks than existing:
+
+```typescript
+function queueUpdate(taskId: string, update: BatchedUpdate): void {
+ const existing = batchQueue.get(taskId) || {};
+
+ // For plan updates, only accept if it has MORE data than existing
+ let mergedPlan = existing.plan;
+ if (update.plan) {
+ const existingSubtasks = existing.plan?.phases?.flatMap(p => p.subtasks || []).length || 0;
+ const newSubtasks = update.plan.phases?.flatMap(p => p.subtasks || []).length || 0;
+
+ if (newSubtasks >= existingSubtasks) {
+ mergedPlan = update.plan; // Accept new plan
+ } else {
+ console.warn('[IPC Batch] Rejecting plan update with fewer subtasks:',
+ { taskId, existing: existingSubtasks, new: newSubtasks });
+ // Keep existing plan, don't overwrite with less complete data
+ }
+ }
+
+ // ... rest of existing code ...
+}
+```
+
+## Testing the Fix
+
+### Manual Verification Steps
+
+1. **Create a new task** and move it to "In Progress"
+2. **Watch the console logs** for:
+ ```
+ [updateTaskFromPlan] called with plan: { phases: 0, totalSubtasks: 0 }
+ ```
+3. **Wait for spec to complete** (planning phase finishes)
+4. **Check console logs** for:
+ ```
+ [updateTaskFromPlan] called with plan: { phases: 3, totalSubtasks: 18 }
+ ```
+5. **Expand subtask list** in task card
+6. **Verify:** Subtasks display with full details, no "!" indicators
+
+### Expected Outcome After Fix
+
+- ✅ Empty/incomplete plan updates are ignored
+- ✅ Only complete plans with phases and subtasks update the UI
+- ✅ Subtasks display with id, description, and status
+- ✅ No "!" warning indicators
+- ✅ Subtask count shows "0/18 completed" (not "0/0")
+- ✅ Plan pulsing animation stops when spec completes
+- ✅ Resume functionality works without infinite loop
+
+## Next Steps
+
+1. ✅ **This Investigation** - Root cause identified (COMPLETE)
+2. 🔄 **Subtask 2-1** - Implement Fix 1 (validation in updateTaskFromPlan)
+3. 🔄 **Subtask 2-2** - Add data validation before subtask state updates
+4. 🔄 **Subtask 2-3** - Fix pulsing animation condition
+5. 🔄 **Subtask 2-4** - Fix resume logic to reload plan if subtasks missing
+6. 🔄 **Phase 3** - Add comprehensive tests to prevent regressions
+
+## Conclusion
+
+**Root Cause:** Frontend receives and accepts incomplete plan data (empty `phases` array) during the spec creation process, before subtasks are written. This overwrites any existing subtask data and leaves the UI in a stuck state with no subtasks to display.
+
+**Fix Priority:** Implement Fix 1 (validation) immediately to prevent incomplete plans from updating state. This is a minimal, low-risk change that will resolve the core issue.
+
+**Long-term Solution:** Add explicit event handling for spec completion (Fix 2) and improve batch queue logic (Fix 3) to make the system more robust against race conditions and out-of-order updates.
diff --git a/README.md b/README.md
index d22c5216a2..b5c6f60cef 100644
--- a/README.md
+++ b/README.md
@@ -4,11 +4,9 @@

-
-[](https://github.com/AndyMik90/Auto-Claude/releases/tag/v2.7.2)
-
[](./agpl-3.0.txt)
[](https://discord.gg/KCXaPBr4Dj)
+[](https://www.youtube.com/@AndreMikalsen)
[](https://github.com/AndyMik90/Auto-Claude/actions)
---
@@ -24,11 +22,11 @@
| Platform | Download |
|----------|----------|
-| **Windows** | [Auto-Claude-2.7.1-win32-x64.exe](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-win32-x64.exe) |
-| **macOS (Apple Silicon)** | [Auto-Claude-2.7.1-darwin-arm64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-darwin-arm64.dmg) |
-| **macOS (Intel)** | [Auto-Claude-2.7.1-darwin-x64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-darwin-x64.dmg) |
-| **Linux** | [Auto-Claude-2.7.1-linux-x86_64.AppImage](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-linux-x86_64.AppImage) |
-| **Linux (Debian)** | [Auto-Claude-2.7.1-linux-amd64.deb](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.1/Auto-Claude-2.7.1-linux-amd64.deb) |
+| **Windows** | [Auto-Claude-2.7.2-win32-x64.exe](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-win32-x64.exe) |
+| **macOS (Apple Silicon)** | [Auto-Claude-2.7.2-darwin-arm64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-darwin-arm64.dmg) |
+| **macOS (Intel)** | [Auto-Claude-2.7.2-darwin-x64.dmg](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-darwin-x64.dmg) |
+| **Linux** | [Auto-Claude-2.7.2-linux-x86_64.AppImage](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-linux-x86_64.AppImage) |
+| **Linux (Debian)** | [Auto-Claude-2.7.2-linux-amd64.deb](https://github.com/AndyMik90/Auto-Claude/releases/download/v2.7.2/Auto-Claude-2.7.2-linux-amd64.deb) |
### Beta Release
@@ -59,7 +57,6 @@
- **Claude Pro/Max subscription** - [Get one here](https://claude.ai/upgrade)
- **Claude Code CLI** - `npm install -g @anthropic-ai/claude-code`
- **Git repository** - Your project must be initialized as a git repo
-- **Python 3.12+** - Required for the backend and Memory Layer
---
@@ -148,113 +145,11 @@ See [guides/CLI-USAGE.md](guides/CLI-USAGE.md) for complete CLI documentation.
---
-## Configuration
+## Development
-Create `apps/backend/.env` from the example:
+Want to build from source or contribute? See [CONTRIBUTING.md](CONTRIBUTING.md) for complete development setup instructions.
-```bash
-cp apps/backend/.env.example apps/backend/.env
-```
-
-| Variable | Required | Description |
-|----------|----------|-------------|
-| `CLAUDE_CODE_OAUTH_TOKEN` | Yes | OAuth token from `claude setup-token` |
-| `GRAPHITI_ENABLED` | No | Enable Memory Layer for cross-session context |
-| `AUTO_BUILD_MODEL` | No | Override the default Claude model |
-| `GITLAB_TOKEN` | No | GitLab Personal Access Token for GitLab integration |
-| `GITLAB_INSTANCE_URL` | No | GitLab instance URL (defaults to gitlab.com) |
-| `LINEAR_API_KEY` | No | Linear API key for task sync |
-
----
-
-## Building from Source
-
-For contributors and development:
-
-```bash
-# Clone the repository
-git clone https://github.com/AndyMik90/Auto-Claude.git
-cd Auto-Claude
-
-# Install all dependencies
-npm run install:all
-
-# Run in development mode
-npm run dev
-
-# Or build and run
-npm start
-```
-
-**System requirements for building:**
-- Node.js 24+
-- Python 3.12+
-- npm 10+
-
-**Installing dependencies by platform:**
-
-
-Windows
-
-```bash
-winget install Python.Python.3.12
-winget install OpenJS.NodeJS.LTS
-```
-
-
-
-
-macOS
-
-```bash
-brew install python@3.12 node@24
-```
-
-
-
-
-Linux (Ubuntu/Debian)
-
-```bash
-sudo apt install python3.12 python3.12-venv
-curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash -
-sudo apt install -y nodejs
-```
-
-
-
-
-Linux (Fedora)
-
-```bash
-sudo dnf install python3.12 nodejs npm
-```
-
-
-
-See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed development setup.
-
-### Building Flatpak
-
-To build the Flatpak package, you need additional dependencies:
-
-```bash
-# Fedora/RHEL
-sudo dnf install flatpak-builder
-
-# Ubuntu/Debian
-sudo apt install flatpak-builder
-
-# Install required Flatpak runtimes
-flatpak install flathub org.freedesktop.Platform//25.08 org.freedesktop.Sdk//25.08
-flatpak install flathub org.electronjs.Electron2.BaseApp//25.08
-
-# Build the Flatpak
-cd apps/frontend
-npm run package:flatpak
-```
-
-The Flatpak will be created in `apps/frontend/dist/`.
+For Linux-specific builds (Flatpak, AppImage), see [guides/linux.md](guides/linux.md).
---
@@ -284,7 +179,7 @@ All releases are:
| `npm run package:mac` | Package for macOS |
| `npm run package:win` | Package for Windows |
| `npm run package:linux` | Package for Linux |
-| `npm run package:flatpak` | Package as Flatpak |
+| `npm run package:flatpak` | Package as Flatpak (see [guides/linux.md](guides/linux.md)) |
| `npm run lint` | Run linter |
| `npm test` | Run frontend tests |
| `npm run test:backend` | Run backend tests |
@@ -316,3 +211,11 @@ We welcome contributions! Please read [CONTRIBUTING.md](CONTRIBUTING.md) for:
Auto Claude is free to use. If you modify and distribute it, or run it as a service, your code must also be open source under AGPL-3.0.
Commercial licensing available for closed-source use cases.
+
+---
+
+## Star History
+
+[](https://github.com/AndyMik90/Auto-Claude/stargazers)
+
+[](https://star-history.com/#AndyMik90/Auto-Claude&Date)
diff --git a/RELEASE.md b/RELEASE.md
index d7f6eb10dd..21d0e6b53d 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -69,9 +69,38 @@ This will:
- Update `apps/frontend/package.json`
- Update `package.json` (root)
- Update `apps/backend/__init__.py`
+- Check if `CHANGELOG.md` has an entry for the new version (warns if missing)
- Create a commit with message `chore: bump version to X.Y.Z`
-### Step 2: Push and Create PR
+### Step 2: Update CHANGELOG.md (REQUIRED)
+
+**IMPORTANT: The release will fail if CHANGELOG.md doesn't have an entry for the new version.**
+
+Add release notes to `CHANGELOG.md` at the top of the file:
+
+```markdown
+## 2.8.0 - Your Release Title
+
+### ✨ New Features
+- Feature description
+
+### 🛠️ Improvements
+- Improvement description
+
+### 🐛 Bug Fixes
+- Fix description
+
+---
+```
+
+Then amend the version bump commit:
+
+```bash
+git add CHANGELOG.md
+git commit --amend --no-edit
+```
+
+### Step 3: Push and Create PR
```bash
# Push your branch
@@ -81,24 +110,25 @@ git push origin your-branch
gh pr create --base main --title "Release v2.8.0"
```
-### Step 3: Merge to Main
+### Step 4: Merge to Main
Once the PR is approved and merged to `main`, GitHub Actions will automatically:
1. **Detect the version bump** (`prepare-release.yml`)
-2. **Create a git tag** (e.g., `v2.8.0`)
-3. **Trigger the release workflow** (`release.yml`)
-4. **Build binaries** for all platforms:
+2. **Validate CHANGELOG.md** has an entry for the new version (FAILS if missing)
+3. **Extract release notes** from CHANGELOG.md
+4. **Create a git tag** (e.g., `v2.8.0`)
+5. **Trigger the release workflow** (`release.yml`)
+6. **Build binaries** for all platforms:
- macOS Intel (x64) - code signed & notarized
- macOS Apple Silicon (arm64) - code signed & notarized
- Windows (NSIS installer) - code signed
- Linux (AppImage + .deb)
-5. **Generate changelog** from merged PRs (using release-drafter)
-6. **Scan binaries** with VirusTotal
-7. **Create GitHub release** with all artifacts
-8. **Update README** with new version badge and download links
+7. **Scan binaries** with VirusTotal
+8. **Create GitHub release** with release notes from CHANGELOG.md
+9. **Update README** with new version badge and download links
-### Step 4: Verify
+### Step 5: Verify
After merging, check:
- [GitHub Actions](https://github.com/AndyMik90/Auto-Claude/actions) - ensure all workflows pass
@@ -113,28 +143,49 @@ We follow [Semantic Versioning](https://semver.org/):
- **MINOR** (0.X.0): New features, backwards compatible
- **PATCH** (0.0.X): Bug fixes, backwards compatible
-## Changelog Generation
+## Changelog Management
+
+Release notes are managed in `CHANGELOG.md` and used for GitHub releases.
+
+### Changelog Format
-Changelogs are automatically generated from merged PRs using [Release Drafter](https://github.com/release-drafter/release-drafter).
+Each version entry in `CHANGELOG.md` should follow this format:
-### PR Labels for Changelog Categories
+```markdown
+## X.Y.Z - Release Title
-| Label | Category |
-|-------|----------|
-| `feature`, `enhancement` | New Features |
-| `bug`, `fix` | Bug Fixes |
-| `improvement`, `refactor` | Improvements |
-| `documentation` | Documentation |
-| (any other) | Other Changes |
+### ✨ New Features
+- Feature description with context
-**Tip:** Add appropriate labels to your PRs for better changelog organization.
+### 🛠️ Improvements
+- Improvement description
+
+### 🐛 Bug Fixes
+- Fix description
+
+---
+```
+
+### Changelog Validation
+
+The release workflow **validates** that `CHANGELOG.md` has an entry for the version being released:
+
+- If the entry is **missing**, the release is **blocked** with a clear error message
+- If the entry **exists**, its content is used for the GitHub release notes
+
+### Writing Good Release Notes
+
+- **Be specific**: Instead of "Fixed bug", write "Fixed crash when opening large files"
+- **Group by impact**: Features first, then improvements, then fixes
+- **Credit contributors**: Mention contributors for significant changes
+- **Link issues**: Reference GitHub issues where relevant (e.g., "Fixes #123")
## Workflows
| Workflow | Trigger | Purpose |
|----------|---------|---------|
-| `prepare-release.yml` | Push to `main` | Detects version bump, creates tag |
-| `release.yml` | Tag `v*` pushed | Builds binaries, creates release |
+| `prepare-release.yml` | Push to `main` | Detects version bump, **validates CHANGELOG.md**, creates tag |
+| `release.yml` | Tag `v*` pushed | Builds binaries, extracts changelog, creates release |
| `validate-version.yml` | Tag `v*` pushed | Validates tag matches package.json |
| `update-readme` (in release.yml) | After release | Updates README with new version |
@@ -153,6 +204,22 @@ Changelogs are automatically generated from merged PRs using [Release Drafter](h
git diff HEAD~1 --name-only | grep package.json
```
+### Release blocked: Missing changelog entry
+
+If you see "CHANGELOG VALIDATION FAILED" in the workflow:
+
+1. The `prepare-release.yml` workflow validated that `CHANGELOG.md` doesn't have an entry for the new version
+2. **Fix**: Add an entry to `CHANGELOG.md` with the format `## X.Y.Z - Title`
+3. Commit and push the changelog update
+4. The workflow will automatically retry when the changes are pushed to `main`
+
+```bash
+# Add changelog entry, then:
+git add CHANGELOG.md
+git commit -m "docs: add changelog for vX.Y.Z"
+git push origin main
+```
+
### Build failed after tag was created
- The release won't be published if builds fail
diff --git a/apps/backend/.env.example b/apps/backend/.env.example
index b481cf5b7d..bd56b9edd5 100644
--- a/apps/backend/.env.example
+++ b/apps/backend/.env.example
@@ -35,6 +35,39 @@
# Default: claude-opus-4-5-20251101
# AUTO_BUILD_MODEL=claude-opus-4-5-20251101
+# =============================================================================
+# MICROSOFT FOUNDRY / AZURE AI FOUNDRY (OPTIONAL)
+# =============================================================================
+# Use Claude models deployed on Microsoft Azure AI Foundry instead of Anthropic API.
+# See: https://code.claude.com/docs/en/microsoft-foundry
+#
+# Prerequisites:
+# 1. Deploy Claude models in Azure AI Foundry portal
+# 2. Get your resource name and API key from "Endpoints and keys" section
+# 3. Configure Claude Code CLI first: claude config set --global provider azure
+#
+# Authentication Options:
+# Option A: API Key (set ANTHROPIC_FOUNDRY_API_KEY)
+# Option B: Microsoft Entra ID (run `az login`, no API key needed)
+
+# Enable Microsoft Foundry integration (REQUIRED for Azure)
+# CLAUDE_CODE_USE_FOUNDRY=1
+
+# Azure resource configuration (choose ONE):
+# Option 1: Resource name only (recommended)
+# ANTHROPIC_FOUNDRY_RESOURCE=your-resource-name
+
+# Option 2: Full base URL
+# ANTHROPIC_FOUNDRY_BASE_URL=https://your-resource-name.services.ai.azure.com
+
+# API Key (optional if using Entra ID authentication via `az login`)
+# ANTHROPIC_FOUNDRY_API_KEY=your-azure-api-key
+
+# Model deployment names (must match your Azure deployment names)
+# ANTHROPIC_DEFAULT_SONNET_MODEL=claude-sonnet-4-5
+# ANTHROPIC_DEFAULT_HAIKU_MODEL=claude-haiku-4-5
+# ANTHROPIC_DEFAULT_OPUS_MODEL=claude-opus-4-1
+
# =============================================================================
# GIT/WORKTREE SETTINGS (OPTIONAL)
diff --git a/apps/backend/agents/README.md b/apps/backend/agents/README.md
index 1cf2b2fb81..85253eae26 100644
--- a/apps/backend/agents/README.md
+++ b/apps/backend/agents/README.md
@@ -26,7 +26,7 @@ auto-claude/agents/
### `utils.py` (3.6 KB)
- Git operations: `get_latest_commit()`, `get_commit_count()`
- Plan management: `load_implementation_plan()`, `find_subtask_in_plan()`, `find_phase_for_subtask()`
-- Workspace sync: `sync_plan_to_source()`
+- Workspace sync: `sync_spec_to_source()`
### `memory.py` (13 KB)
- Dual-layer memory system (Graphiti primary, file-based fallback)
@@ -73,7 +73,7 @@ from agents import (
# Utilities
get_latest_commit,
load_implementation_plan,
- sync_plan_to_source,
+ sync_spec_to_source,
)
```
diff --git a/apps/backend/agents/__init__.py b/apps/backend/agents/__init__.py
index 37dae174c4..4eed468607 100644
--- a/apps/backend/agents/__init__.py
+++ b/apps/backend/agents/__init__.py
@@ -14,6 +14,10 @@
Uses lazy imports to avoid circular dependencies.
"""
+# Explicit import required by CodeQL static analysis
+# (CodeQL doesn't recognize __getattr__ dynamic exports)
+from .utils import sync_spec_to_source
+
__all__ = [
# Main API
"run_autonomous_agent",
@@ -32,7 +36,7 @@
"load_implementation_plan",
"find_subtask_in_plan",
"find_phase_for_subtask",
- "sync_plan_to_source",
+ "sync_spec_to_source",
# Constants
"AUTO_CONTINUE_DELAY_SECONDS",
"HUMAN_INTERVENTION_FILE",
@@ -77,7 +81,7 @@ def __getattr__(name):
"get_commit_count",
"get_latest_commit",
"load_implementation_plan",
- "sync_plan_to_source",
+ "sync_spec_to_source",
):
from .utils import (
find_phase_for_subtask,
@@ -85,7 +89,7 @@ def __getattr__(name):
get_commit_count,
get_latest_commit,
load_implementation_plan,
- sync_plan_to_source,
+ sync_spec_to_source,
)
return locals()[name]
diff --git a/apps/backend/agents/coder.py b/apps/backend/agents/coder.py
index 39d43b30a0..863aef1c7d 100644
--- a/apps/backend/agents/coder.py
+++ b/apps/backend/agents/coder.py
@@ -7,6 +7,7 @@
import asyncio
import logging
+import os
from pathlib import Path
from core.client import create_client
@@ -37,6 +38,7 @@
)
from prompts import is_first_run
from recovery import RecoveryManager
+from security.constants import PROJECT_DIR_ENV_VAR
from task_logger import (
LogPhase,
get_task_logger,
@@ -62,7 +64,7 @@
get_commit_count,
get_latest_commit,
load_implementation_plan,
- sync_plan_to_source,
+ sync_spec_to_source,
)
logger = logging.getLogger(__name__)
@@ -90,6 +92,10 @@ async def run_autonomous_agent(
verbose: Whether to show detailed output
source_spec_dir: Original spec directory in main project (for syncing from worktree)
"""
+ # Set environment variable for security hooks to find the correct project directory
+ # This is needed because os.getcwd() may return the wrong directory in worktree mode
+ os.environ[PROJECT_DIR_ENV_VAR] = str(project_dir.resolve())
+
# Initialize recovery manager (handles memory persistence)
recovery_manager = RecoveryManager(spec_dir, project_dir)
@@ -404,7 +410,7 @@ async def run_autonomous_agent(
print_status("Linear notified of stuck subtask", "info")
elif is_planning_phase and source_spec_dir:
# After planning phase, sync the newly created implementation plan back to source
- if sync_plan_to_source(spec_dir, source_spec_dir):
+ if sync_spec_to_source(spec_dir, source_spec_dir):
print_status("Implementation plan synced to main project", "success")
# Handle session status
diff --git a/apps/backend/agents/session.py b/apps/backend/agents/session.py
index 89a5d5d48c..263bf17efb 100644
--- a/apps/backend/agents/session.py
+++ b/apps/backend/agents/session.py
@@ -40,7 +40,7 @@
get_commit_count,
get_latest_commit,
load_implementation_plan,
- sync_plan_to_source,
+ sync_spec_to_source,
)
logger = logging.getLogger(__name__)
@@ -82,7 +82,7 @@ async def post_session_processing(
print(muted("--- Post-Session Processing ---"))
# Sync implementation plan back to source (for worktree mode)
- if sync_plan_to_source(spec_dir, source_spec_dir):
+ if sync_spec_to_source(spec_dir, source_spec_dir):
print_status("Implementation plan synced to main project", "success")
# Check if implementation plan was updated
@@ -445,8 +445,9 @@ async def run_agent_session(
result_content = getattr(block, "content", "")
is_error = getattr(block, "is_error", False)
- # Check if command was blocked by security hook
- if "blocked" in str(result_content).lower():
+ # Check if this is an error (not just content containing "blocked")
+ if is_error and "blocked" in str(result_content).lower():
+ # Actual blocked command by security hook
debug_error(
"session",
f"Tool BLOCKED: {current_tool}",
diff --git a/apps/backend/agents/tools_pkg/tools/memory.py b/apps/backend/agents/tools_pkg/tools/memory.py
index ac361ab78c..b5367663e9 100644
--- a/apps/backend/agents/tools_pkg/tools/memory.py
+++ b/apps/backend/agents/tools_pkg/tools/memory.py
@@ -4,9 +4,16 @@
Tools for recording and retrieving session memory, including discoveries,
gotchas, and patterns.
+
+Dual-storage approach:
+- File-based: Always available, works offline, spec-specific
+- LadybugDB: When Graphiti is enabled, also saves to graph database for
+ cross-session retrieval and Memory UI display
"""
+import asyncio
import json
+import logging
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
@@ -19,6 +26,108 @@
SDK_TOOLS_AVAILABLE = False
tool = None
+logger = logging.getLogger(__name__)
+
+
+async def _save_to_graphiti_async(
+ spec_dir: Path,
+ project_dir: Path,
+ save_type: str,
+ data: dict,
+) -> bool:
+ """
+ Save data to Graphiti/LadybugDB (async implementation).
+
+ Args:
+ spec_dir: Spec directory for GraphitiMemory initialization
+ project_dir: Project root directory
+ save_type: Type of save - 'discovery', 'gotcha', or 'pattern'
+ data: Data to save
+
+ Returns:
+ True if save succeeded, False otherwise
+ """
+ try:
+ # Check if Graphiti is enabled
+ from graphiti_config import is_graphiti_enabled
+
+ if not is_graphiti_enabled():
+ return False
+
+ from integrations.graphiti.queries_pkg.graphiti import GraphitiMemory
+
+ memory = GraphitiMemory(spec_dir, project_dir)
+ try:
+ if save_type == "discovery":
+ # Save as codebase discovery
+ # Format: {file_path: description}
+ result = await memory.save_codebase_discoveries(
+ {data["file_path"]: data["description"]}
+ )
+ elif save_type == "gotcha":
+ # Save as gotcha
+ gotcha_text = data["gotcha"]
+ if data.get("context"):
+ gotcha_text += f" (Context: {data['context']})"
+ result = await memory.save_gotcha(gotcha_text)
+ elif save_type == "pattern":
+ # Save as pattern
+ result = await memory.save_pattern(data["pattern"])
+ else:
+ result = False
+ return result
+ finally:
+ await memory.close()
+
+ except ImportError as e:
+ logger.debug(f"Graphiti not available for memory tools: {e}")
+ return False
+ except Exception as e:
+ logger.warning(f"Failed to save to Graphiti: {e}")
+ return False
+
+
+def _save_to_graphiti_sync(
+ spec_dir: Path,
+ project_dir: Path,
+ save_type: str,
+ data: dict,
+) -> bool:
+ """
+ Save data to Graphiti/LadybugDB (synchronous wrapper for sync contexts only).
+
+ NOTE: This should only be called from synchronous code. For async callers,
+ use _save_to_graphiti_async() directly to ensure proper resource cleanup.
+
+ Args:
+ spec_dir: Spec directory for GraphitiMemory initialization
+ project_dir: Project root directory
+ save_type: Type of save - 'discovery', 'gotcha', or 'pattern'
+ data: Data to save
+
+ Returns:
+ True if save succeeded, False otherwise
+ """
+ try:
+ # Check if we're already in an async context
+ try:
+ asyncio.get_running_loop()
+ # We're in an async context - caller should use _save_to_graphiti_async
+ # Log a warning and return False to avoid the resource leak bug
+ logger.warning(
+ "_save_to_graphiti_sync called from async context. "
+ "Use _save_to_graphiti_async instead for proper cleanup."
+ )
+ return False
+ except RuntimeError:
+ # No running loop - safe to create one
+ return asyncio.run(
+ _save_to_graphiti_async(spec_dir, project_dir, save_type, data)
+ )
+ except Exception as e:
+ logger.warning(f"Failed to save to Graphiti: {e}")
+ return False
+
def create_memory_tools(spec_dir: Path, project_dir: Path) -> list:
"""
@@ -45,7 +154,7 @@ def create_memory_tools(spec_dir: Path, project_dir: Path) -> list:
{"file_path": str, "description": str, "category": str},
)
async def record_discovery(args: dict[str, Any]) -> dict[str, Any]:
- """Record a discovery to the codebase map."""
+ """Record a discovery to the codebase map (file + Graphiti)."""
file_path = args["file_path"]
description = args["description"]
category = args.get("category", "general")
@@ -54,8 +163,10 @@ async def record_discovery(args: dict[str, Any]) -> dict[str, Any]:
memory_dir.mkdir(exist_ok=True)
codebase_map_file = memory_dir / "codebase_map.json"
+ saved_to_graphiti = False
try:
+ # PRIMARY: Save to file-based storage (always works)
# Load existing map or create new
if codebase_map_file.exists():
with open(codebase_map_file) as f:
@@ -77,11 +188,23 @@ async def record_discovery(args: dict[str, Any]) -> dict[str, Any]:
with open(codebase_map_file, "w") as f:
json.dump(codebase_map, f, indent=2)
+ # SECONDARY: Also save to Graphiti/LadybugDB (for Memory UI)
+ saved_to_graphiti = await _save_to_graphiti_async(
+ spec_dir,
+ project_dir,
+ "discovery",
+ {
+ "file_path": file_path,
+ "description": f"[{category}] {description}",
+ },
+ )
+
+ storage_note = " (also saved to memory graph)" if saved_to_graphiti else ""
return {
"content": [
{
"type": "text",
- "text": f"Recorded discovery for '{file_path}': {description}",
+ "text": f"Recorded discovery for '{file_path}': {description}{storage_note}",
}
]
}
@@ -102,7 +225,7 @@ async def record_discovery(args: dict[str, Any]) -> dict[str, Any]:
{"gotcha": str, "context": str},
)
async def record_gotcha(args: dict[str, Any]) -> dict[str, Any]:
- """Record a gotcha to session memory."""
+ """Record a gotcha to session memory (file + Graphiti)."""
gotcha = args["gotcha"]
context = args.get("context", "")
@@ -110,8 +233,10 @@ async def record_gotcha(args: dict[str, Any]) -> dict[str, Any]:
memory_dir.mkdir(exist_ok=True)
gotchas_file = memory_dir / "gotchas.md"
+ saved_to_graphiti = False
try:
+ # PRIMARY: Save to file-based storage (always works)
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M")
entry = f"\n## [{timestamp}]\n{gotcha}"
@@ -126,7 +251,20 @@ async def record_gotcha(args: dict[str, Any]) -> dict[str, Any]:
)
f.write(entry)
- return {"content": [{"type": "text", "text": f"Recorded gotcha: {gotcha}"}]}
+ # SECONDARY: Also save to Graphiti/LadybugDB (for Memory UI)
+ saved_to_graphiti = await _save_to_graphiti_async(
+ spec_dir,
+ project_dir,
+ "gotcha",
+ {"gotcha": gotcha, "context": context},
+ )
+
+ storage_note = " (also saved to memory graph)" if saved_to_graphiti else ""
+ return {
+ "content": [
+ {"type": "text", "text": f"Recorded gotcha: {gotcha}{storage_note}"}
+ ]
+ }
except Exception as e:
return {
diff --git a/apps/backend/agents/utils.py b/apps/backend/agents/utils.py
index 8ce33c9224..614cdb795a 100644
--- a/apps/backend/agents/utils.py
+++ b/apps/backend/agents/utils.py
@@ -8,40 +8,38 @@
import json
import logging
import shutil
-import subprocess
from pathlib import Path
+from core.git_executable import run_git
+
logger = logging.getLogger(__name__)
def get_latest_commit(project_dir: Path) -> str | None:
"""Get the hash of the latest git commit."""
- try:
- result = subprocess.run(
- ["git", "rev-parse", "HEAD"],
- cwd=project_dir,
- capture_output=True,
- text=True,
- check=True,
- )
+ result = run_git(
+ ["rev-parse", "HEAD"],
+ cwd=project_dir,
+ timeout=10,
+ )
+ if result.returncode == 0:
return result.stdout.strip()
- except subprocess.CalledProcessError:
- return None
+ return None
def get_commit_count(project_dir: Path) -> int:
"""Get the total number of commits."""
- try:
- result = subprocess.run(
- ["git", "rev-list", "--count", "HEAD"],
- cwd=project_dir,
- capture_output=True,
- text=True,
- check=True,
- )
- return int(result.stdout.strip())
- except (subprocess.CalledProcessError, ValueError):
- return 0
+ result = run_git(
+ ["rev-list", "--count", "HEAD"],
+ cwd=project_dir,
+ timeout=10,
+ )
+ if result.returncode == 0:
+ try:
+ return int(result.stdout.strip())
+ except ValueError:
+ return 0
+ return 0
def load_implementation_plan(spec_dir: Path) -> dict | None:
@@ -74,16 +72,32 @@ def find_phase_for_subtask(plan: dict, subtask_id: str) -> dict | None:
return None
-def sync_plan_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool:
+def sync_spec_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool:
"""
- Sync implementation_plan.json from worktree back to source spec directory.
-
- When running in isolated mode (worktrees), the agent updates the implementation
- plan inside the worktree. This function syncs those changes back to the main
- project's spec directory so the frontend/UI can see the progress.
+ Sync ALL spec files from worktree back to source spec directory.
+
+ When running in isolated mode (worktrees), the agent creates and updates
+ many files inside the worktree's spec directory. This function syncs ALL
+ of them back to the main project's spec directory.
+
+ IMPORTANT: Since .auto-claude/ is gitignored, this sync happens to the
+ local filesystem regardless of what branch the user is on. The worktree
+ may be on a different branch (e.g., auto-claude/093-task), but the sync
+ target is always the main project's .auto-claude/specs/ directory.
+
+ Files synced (all files in spec directory):
+ - implementation_plan.json - Task status and subtask completion
+ - build-progress.txt - Session-by-session progress notes
+ - task_logs.json - Execution logs
+ - review_state.json - QA review state
+ - critique_report.json - Spec critique findings
+ - suggested_commit_message.txt - Commit suggestions
+ - REGRESSION_TEST_REPORT.md - Test regression report
+ - spec.md, context.json, etc. - Original spec files (for completeness)
+ - memory/ directory - Codebase map, patterns, gotchas, session insights
Args:
- spec_dir: Current spec directory (may be inside worktree)
+ spec_dir: Current spec directory (inside worktree)
source_spec_dir: Original spec directory in main project (outside worktree)
Returns:
@@ -100,17 +114,68 @@ def sync_plan_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool:
if spec_dir_resolved == source_spec_dir_resolved:
return False # Same directory, no sync needed
- # Sync the implementation plan
- plan_file = spec_dir / "implementation_plan.json"
- if not plan_file.exists():
- return False
+ synced_any = False
- source_plan_file = source_spec_dir / "implementation_plan.json"
+ # Ensure source directory exists
+ source_spec_dir.mkdir(parents=True, exist_ok=True)
try:
- shutil.copy2(plan_file, source_plan_file)
- logger.debug(f"Synced implementation plan to source: {source_plan_file}")
- return True
+ # Sync all files and directories from worktree spec to source spec
+ for item in spec_dir.iterdir():
+ # Skip symlinks to prevent path traversal attacks
+ if item.is_symlink():
+ logger.warning(f"Skipping symlink during sync: {item.name}")
+ continue
+
+ source_item = source_spec_dir / item.name
+
+ if item.is_file():
+ # Copy file (preserves timestamps)
+ shutil.copy2(item, source_item)
+ logger.debug(f"Synced {item.name} to source")
+ synced_any = True
+
+ elif item.is_dir():
+ # Recursively sync directory
+ _sync_directory(item, source_item)
+ synced_any = True
+
except Exception as e:
- logger.warning(f"Failed to sync implementation plan to source: {e}")
- return False
+ logger.warning(f"Failed to sync spec directory to source: {e}")
+
+ return synced_any
+
+
+def _sync_directory(source_dir: Path, target_dir: Path) -> None:
+ """
+ Recursively sync a directory from source to target.
+
+ Args:
+ source_dir: Source directory (in worktree)
+ target_dir: Target directory (in main project)
+ """
+ # Create target directory if needed
+ target_dir.mkdir(parents=True, exist_ok=True)
+
+ for item in source_dir.iterdir():
+ # Skip symlinks to prevent path traversal attacks
+ if item.is_symlink():
+ logger.warning(
+ f"Skipping symlink during sync: {source_dir.name}/{item.name}"
+ )
+ continue
+
+ target_item = target_dir / item.name
+
+ if item.is_file():
+ shutil.copy2(item, target_item)
+ logger.debug(f"Synced {source_dir.name}/{item.name} to source")
+ elif item.is_dir():
+ # Recurse into subdirectories
+ _sync_directory(item, target_item)
+
+
+# Keep the old name as an alias for backward compatibility
+def sync_plan_to_source(spec_dir: Path, source_spec_dir: Path | None) -> bool:
+ """Alias for sync_spec_to_source for backward compatibility."""
+ return sync_spec_to_source(spec_dir, source_spec_dir)
diff --git a/apps/backend/analysis/insight_extractor.py b/apps/backend/analysis/insight_extractor.py
index 75974d6b59..7b461afbae 100644
--- a/apps/backend/analysis/insight_extractor.py
+++ b/apps/backend/analysis/insight_extractor.py
@@ -387,12 +387,40 @@ async def run_insight_extraction(
# Collect the response
response_text = ""
+ message_count = 0
+ text_blocks_found = 0
+
async for msg in client.receive_response():
msg_type = type(msg).__name__
+ message_count += 1
+
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
- response_text += block.text
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
+ text_blocks_found += 1
+ if block.text: # Only add non-empty text
+ response_text += block.text
+ else:
+ logger.debug(
+ f"Found empty TextBlock in response (block #{text_blocks_found})"
+ )
+
+ # Log response collection summary
+ logger.debug(
+ f"Insight extraction response: {message_count} messages, "
+ f"{text_blocks_found} text blocks, {len(response_text)} chars collected"
+ )
+
+ # Validate we received content before parsing
+ if not response_text.strip():
+ logger.warning(
+ f"Insight extraction returned empty response. "
+ f"Messages received: {message_count}, TextBlocks found: {text_blocks_found}. "
+ f"This may indicate the AI model did not respond with text content."
+ )
+ return None
# Parse JSON from response
return parse_insights(response_text)
@@ -415,6 +443,11 @@ def parse_insights(response_text: str) -> dict | None:
# Try to extract JSON from the response
text = response_text.strip()
+ # Early validation - check for empty response
+ if not text:
+ logger.warning("Cannot parse insights: response text is empty")
+ return None
+
# Handle markdown code blocks
if text.startswith("```"):
# Remove code block markers
@@ -422,17 +455,26 @@ def parse_insights(response_text: str) -> dict | None:
# Remove first line (```json or ```)
if lines[0].startswith("```"):
lines = lines[1:]
- # Remove last line if it's ``
+ # Remove last line if it's ```
if lines and lines[-1].strip() == "```":
lines = lines[:-1]
- text = "\n".join(lines)
+ text = "\n".join(lines).strip()
+
+ # Check again after removing code blocks
+ if not text:
+ logger.warning(
+ "Cannot parse insights: response contained only markdown code block markers with no content"
+ )
+ return None
try:
insights = json.loads(text)
# Validate structure
if not isinstance(insights, dict):
- logger.warning("Insights is not a dict")
+ logger.warning(
+ f"Insights is not a dict, got type: {type(insights).__name__}"
+ )
return None
# Ensure required keys exist with defaults
@@ -446,7 +488,13 @@ def parse_insights(response_text: str) -> dict | None:
except json.JSONDecodeError as e:
logger.warning(f"Failed to parse insights JSON: {e}")
- logger.debug(f"Response text was: {text[:500]}")
+ # Show more context in the error message
+ preview_length = min(500, len(text))
+ logger.warning(
+ f"Response text preview (first {preview_length} chars): {text[:preview_length]}"
+ )
+ if len(text) > preview_length:
+ logger.warning(f"... (total length: {len(text)} chars)")
return None
diff --git a/apps/backend/cli/batch_commands.py b/apps/backend/cli/batch_commands.py
index 28a82ea90a..959df5eeac 100644
--- a/apps/backend/cli/batch_commands.py
+++ b/apps/backend/cli/batch_commands.py
@@ -6,6 +6,8 @@
"""
import json
+import shutil
+import subprocess
from pathlib import Path
from ui import highlight, print_status
@@ -184,7 +186,7 @@ def handle_batch_cleanup_command(project_dir: str, dry_run: bool = True) -> bool
True if successful
"""
specs_dir = Path(project_dir) / ".auto-claude" / "specs"
- worktrees_dir = Path(project_dir) / ".worktrees"
+ worktrees_dir = Path(project_dir) / ".auto-claude" / "worktrees" / "tasks"
if not specs_dir.exists():
print_status("No specs directory found", "info")
@@ -209,8 +211,56 @@ def handle_batch_cleanup_command(project_dir: str, dry_run: bool = True) -> bool
print(f" - {spec_name}")
wt_path = worktrees_dir / spec_name
if wt_path.exists():
- print(f" └─ .worktrees/{spec_name}/")
+ print(f" └─ .auto-claude/worktrees/tasks/{spec_name}/")
print()
print("Run with --no-dry-run to actually delete")
+ else:
+ # Actually delete specs and worktrees
+ deleted_count = 0
+ for spec_name in completed:
+ spec_path = specs_dir / spec_name
+ wt_path = worktrees_dir / spec_name
+
+ # Remove worktree first (if exists)
+ if wt_path.exists():
+ try:
+ result = subprocess.run(
+ ["git", "worktree", "remove", "--force", str(wt_path)],
+ cwd=project_dir,
+ capture_output=True,
+ text=True,
+ timeout=30,
+ )
+ if result.returncode == 0:
+ print_status(f"Removed worktree: {spec_name}", "success")
+ else:
+ # Fallback: remove directory manually if git fails
+ shutil.rmtree(wt_path, ignore_errors=True)
+ print_status(
+ f"Removed worktree directory: {spec_name}", "success"
+ )
+ except subprocess.TimeoutExpired:
+ # Timeout: fall back to manual removal
+ shutil.rmtree(wt_path, ignore_errors=True)
+ print_status(
+ f"Worktree removal timed out, removed directory: {spec_name}",
+ "warning",
+ )
+ except Exception as e:
+ print_status(
+ f"Failed to remove worktree {spec_name}: {e}", "warning"
+ )
+
+ # Remove spec directory
+ if spec_path.exists():
+ try:
+ shutil.rmtree(spec_path)
+ print_status(f"Removed spec: {spec_name}", "success")
+ deleted_count += 1
+ except Exception as e:
+ print_status(f"Failed to remove spec {spec_name}: {e}", "error")
+
+ print()
+ print_status(f"Cleaned up {deleted_count} spec(s)", "info")
return True
diff --git a/apps/backend/cli/build_commands.py b/apps/backend/cli/build_commands.py
index 19dc17ca6b..ad5766ac54 100644
--- a/apps/backend/cli/build_commands.py
+++ b/apps/backend/cli/build_commands.py
@@ -79,7 +79,7 @@ def handle_build_command(
base_branch: Base branch for worktree creation (default: current branch)
"""
# Lazy imports to avoid loading heavy modules
- from agent import run_autonomous_agent, sync_plan_to_source
+ from agent import run_autonomous_agent, sync_spec_to_source
from debug import (
debug,
debug_info,
@@ -274,7 +274,7 @@ def handle_build_command(
# Sync implementation plan to main project after QA
# This ensures the main project has the latest status (human_review)
- if sync_plan_to_source(spec_dir, source_spec_dir):
+ if sync_spec_to_source(spec_dir, source_spec_dir):
debug_info(
"run.py", "Implementation plan synced to main project after QA"
)
diff --git a/apps/backend/cli/main.py b/apps/backend/cli/main.py
index 9b910b5311..cfb6a6a414 100644
--- a/apps/backend/cli/main.py
+++ b/apps/backend/cli/main.py
@@ -38,6 +38,7 @@
)
from .workspace_commands import (
handle_cleanup_worktrees_command,
+ handle_create_pr_command,
handle_discard_command,
handle_list_worktrees_command,
handle_merge_command,
@@ -153,6 +154,30 @@ def parse_args() -> argparse.Namespace:
action="store_true",
help="Discard an existing build (requires confirmation)",
)
+ build_group.add_argument(
+ "--create-pr",
+ action="store_true",
+ help="Push branch and create a GitHub Pull Request",
+ )
+
+ # PR options
+ parser.add_argument(
+ "--pr-target",
+ type=str,
+ metavar="BRANCH",
+ help="With --create-pr: target branch for PR (default: auto-detect)",
+ )
+ parser.add_argument(
+ "--pr-title",
+ type=str,
+ metavar="TITLE",
+ help="With --create-pr: custom PR title (default: generated from spec name)",
+ )
+ parser.add_argument(
+ "--pr-draft",
+ action="store_true",
+ help="With --create-pr: create as draft PR",
+ )
# Merge options
parser.add_argument(
@@ -365,6 +390,21 @@ def main() -> None:
handle_discard_command(project_dir, spec_dir.name)
return
+ if args.create_pr:
+ # Pass args.pr_target directly - WorktreeManager._detect_base_branch
+ # handles base branch detection internally when target_branch is None
+ result = handle_create_pr_command(
+ project_dir=project_dir,
+ spec_name=spec_dir.name,
+ target_branch=args.pr_target,
+ title=args.pr_title,
+ draft=args.pr_draft,
+ )
+ # JSON output is already printed by handle_create_pr_command
+ if not result.get("success"):
+ sys.exit(1)
+ return
+
# Handle QA commands
if args.qa_status:
handle_qa_status_command(spec_dir)
diff --git a/apps/backend/cli/utils.py b/apps/backend/cli/utils.py
index f18954654a..0e2a7b427a 100644
--- a/apps/backend/cli/utils.py
+++ b/apps/backend/cli/utils.py
@@ -15,7 +15,47 @@
sys.path.insert(0, str(_PARENT_DIR))
from core.auth import get_auth_token, get_auth_token_source
-from dotenv import load_dotenv
+from core.dependency_validator import validate_platform_dependencies
+
+
+def import_dotenv():
+ """
+ Import and return load_dotenv with helpful error message if not installed.
+
+ This centralized function ensures consistent error messaging across all
+ runner scripts when python-dotenv is not available.
+
+ Returns:
+ The load_dotenv function
+
+ Raises:
+ SystemExit: If dotenv cannot be imported, with helpful installation instructions.
+ """
+ try:
+ from dotenv import load_dotenv as _load_dotenv
+
+ return _load_dotenv
+ except ImportError:
+ sys.exit(
+ "Error: Required Python package 'python-dotenv' is not installed.\n"
+ "\n"
+ "This usually means you're not using the virtual environment.\n"
+ "\n"
+ "To fix this:\n"
+ "1. From the 'apps/backend/' directory, activate the venv:\n"
+ " source .venv/bin/activate # Linux/macOS\n"
+ " .venv\\Scripts\\activate # Windows\n"
+ "\n"
+ "2. Or install dependencies directly:\n"
+ " pip install python-dotenv\n"
+ " pip install -r requirements.txt\n"
+ "\n"
+ f"Current Python: {sys.executable}\n"
+ )
+
+
+# Load .env with helpful error if dependencies not installed
+load_dotenv = import_dotenv()
from graphiti_config import get_graphiti_status
from linear_integration import LinearManager
from linear_updater import is_linear_enabled
@@ -28,8 +68,8 @@
muted,
)
-# Configuration
-DEFAULT_MODEL = "claude-opus-4-5-20251101"
+# Configuration - uses shorthand that resolves via API Profile if configured
+DEFAULT_MODEL = "sonnet" # Changed from "opus" (fix #433)
def setup_environment() -> Path:
@@ -82,7 +122,7 @@ def find_spec(project_dir: Path, spec_identifier: str) -> Path | None:
return spec_folder
# Check worktree specs (for merge-preview, merge, review, discard operations)
- worktree_base = project_dir / ".worktrees"
+ worktree_base = project_dir / ".auto-claude" / "worktrees" / "tasks"
if worktree_base.exists():
# Try exact match in worktree
worktree_spec = (
@@ -115,6 +155,9 @@ def validate_environment(spec_dir: Path) -> bool:
Returns:
True if valid, False otherwise (with error messages printed)
"""
+ # Validate platform-specific dependencies first (exits if missing)
+ validate_platform_dependencies()
+
valid = True
# Check for OAuth token (API keys are not supported)
diff --git a/apps/backend/cli/workspace_commands.py b/apps/backend/cli/workspace_commands.py
index 5e3d68a5aa..85f9f7327d 100644
--- a/apps/backend/cli/workspace_commands.py
+++ b/apps/backend/cli/workspace_commands.py
@@ -5,6 +5,7 @@
CLI commands for workspace management (merge, review, discard, list, cleanup)
"""
+import json
import subprocess
import sys
from pathlib import Path
@@ -22,6 +23,8 @@
get_merge_base,
is_lock_file,
)
+from core.worktree import PushAndCreatePRResult as CreatePRResult
+from core.worktree import WorktreeManager
from debug import debug_warning
from ui import (
Icons,
@@ -30,6 +33,7 @@
from workspace import (
cleanup_all_worktrees,
discard_existing_build,
+ get_existing_build_worktree,
list_all_worktrees,
merge_existing_build,
review_existing_build,
@@ -67,6 +71,7 @@ def _detect_default_branch(project_dir: Path) -> str:
cwd=project_dir,
capture_output=True,
text=True,
+ timeout=5,
)
if result.returncode == 0:
return env_branch
@@ -78,6 +83,7 @@ def _detect_default_branch(project_dir: Path) -> str:
cwd=project_dir,
capture_output=True,
text=True,
+ timeout=5,
)
if result.returncode == 0:
return branch
@@ -90,18 +96,32 @@ def _get_changed_files_from_git(
worktree_path: Path, base_branch: str = "main"
) -> list[str]:
"""
- Get list of changed files from git diff between base branch and HEAD.
+ Get list of files changed by the task (not files changed on base branch).
+
+ Uses merge-base to accurately identify only the files modified in the worktree,
+ not files that changed on the base branch since the worktree was created.
Args:
worktree_path: Path to the worktree
base_branch: Base branch to compare against (default: main)
Returns:
- List of changed file paths
+ List of changed file paths (task changes only)
"""
try:
+ # First, get the merge-base (the point where the worktree branched)
+ merge_base_result = subprocess.run(
+ ["git", "merge-base", base_branch, "HEAD"],
+ cwd=worktree_path,
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+ merge_base = merge_base_result.stdout.strip()
+
+ # Use two-dot diff from merge-base to get only task's changes
result = subprocess.run(
- ["git", "diff", "--name-only", f"{base_branch}...HEAD"],
+ ["git", "diff", "--name-only", f"{merge_base}..HEAD"],
cwd=worktree_path,
capture_output=True,
text=True,
@@ -113,10 +133,10 @@ def _get_changed_files_from_git(
# Log the failure before trying fallback
debug_warning(
"workspace_commands",
- f"git diff (three-dot) failed: returncode={e.returncode}, "
+ f"git diff with merge-base failed: returncode={e.returncode}, "
f"stderr={e.stderr.strip() if e.stderr else 'N/A'}",
)
- # Fallback: try without the three-dot notation
+ # Fallback: try direct two-arg diff (less accurate but works)
try:
result = subprocess.run(
["git", "diff", "--name-only", base_branch, "HEAD"],
@@ -131,12 +151,176 @@ def _get_changed_files_from_git(
# Log the failure before returning empty list
debug_warning(
"workspace_commands",
- f"git diff (two-arg) failed: returncode={e.returncode}, "
+ f"git diff (fallback) failed: returncode={e.returncode}, "
f"stderr={e.stderr.strip() if e.stderr else 'N/A'}",
)
return []
+def _detect_worktree_base_branch(
+ project_dir: Path,
+ worktree_path: Path,
+ spec_name: str,
+) -> str | None:
+ """
+ Detect which branch a worktree was created from.
+
+ Tries multiple strategies:
+ 1. Check worktree config file (.auto-claude/worktree-config.json)
+ 2. Find merge-base with known branches (develop, main, master)
+ 3. Return None if unable to detect
+
+ Args:
+ project_dir: Project root directory
+ worktree_path: Path to the worktree
+ spec_name: Name of the spec
+
+ Returns:
+ The detected base branch name, or None if unable to detect
+ """
+ # Strategy 1: Check for worktree config file
+ config_path = worktree_path / ".auto-claude" / "worktree-config.json"
+ if config_path.exists():
+ try:
+ config = json.loads(config_path.read_text())
+ if config.get("base_branch"):
+ debug(
+ MODULE,
+ f"Found base branch in worktree config: {config['base_branch']}",
+ )
+ return config["base_branch"]
+ except Exception as e:
+ debug_warning(MODULE, f"Failed to read worktree config: {e}")
+
+ # Strategy 2: Find which branch has the closest merge-base
+ # Check common branches: develop, main, master
+ spec_branch = f"auto-claude/{spec_name}"
+ candidate_branches = ["develop", "main", "master"]
+
+ best_branch = None
+ best_commits_behind = float("inf")
+
+ for branch in candidate_branches:
+ try:
+ # Check if branch exists
+ check = subprocess.run(
+ ["git", "rev-parse", "--verify", branch],
+ cwd=project_dir,
+ capture_output=True,
+ text=True,
+ )
+ if check.returncode != 0:
+ continue
+
+ # Get merge base
+ merge_base_result = subprocess.run(
+ ["git", "merge-base", branch, spec_branch],
+ cwd=project_dir,
+ capture_output=True,
+ text=True,
+ )
+ if merge_base_result.returncode != 0:
+ continue
+
+ merge_base = merge_base_result.stdout.strip()
+
+ # Count commits between merge-base and branch tip
+ # The branch with fewer commits ahead is likely the one we branched from
+ ahead_result = subprocess.run(
+ ["git", "rev-list", "--count", f"{merge_base}..{branch}"],
+ cwd=project_dir,
+ capture_output=True,
+ text=True,
+ )
+ if ahead_result.returncode == 0:
+ commits_ahead = int(ahead_result.stdout.strip())
+ debug(
+ MODULE,
+ f"Branch {branch} is {commits_ahead} commits ahead of merge-base",
+ )
+ if commits_ahead < best_commits_behind:
+ best_commits_behind = commits_ahead
+ best_branch = branch
+ except Exception as e:
+ debug_warning(MODULE, f"Error checking branch {branch}: {e}")
+ continue
+
+ if best_branch:
+ debug(
+ MODULE,
+ f"Detected base branch from git history: {best_branch} (commits ahead: {best_commits_behind})",
+ )
+ return best_branch
+
+ return None
+
+
+def _detect_parallel_task_conflicts(
+ project_dir: Path,
+ current_task_id: str,
+ current_task_files: list[str],
+) -> list[dict]:
+ """
+ Detect potential conflicts between this task and other active tasks.
+
+ Uses existing evolution data to check if any of this task's files
+ have been modified by other active tasks. This is a lightweight check
+ that doesn't require re-processing all files.
+
+ Args:
+ project_dir: Project root directory
+ current_task_id: ID of the current task
+ current_task_files: Files modified by this task (from git diff)
+
+ Returns:
+ List of conflict dictionaries with 'file' and 'tasks' keys
+ """
+ try:
+ from merge import MergeOrchestrator
+
+ # Initialize orchestrator just to access evolution data
+ orchestrator = MergeOrchestrator(
+ project_dir,
+ enable_ai=False,
+ dry_run=True,
+ )
+
+ # Get all active tasks from evolution data
+ active_tasks = orchestrator.evolution_tracker.get_active_tasks()
+
+ # Remove current task from active tasks
+ other_active_tasks = active_tasks - {current_task_id}
+
+ if not other_active_tasks:
+ return []
+
+ # Convert current task files to a set for fast lookup
+ current_files_set = set(current_task_files)
+
+ # Get files modified by other active tasks
+ conflicts = []
+ other_task_files = orchestrator.evolution_tracker.get_files_modified_by_tasks(
+ list(other_active_tasks)
+ )
+
+ # Find intersection - files modified by both this task and other tasks
+ for file_path, tasks in other_task_files.items():
+ if file_path in current_files_set:
+ # This file was modified by both current task and other task(s)
+ all_tasks = [current_task_id] + tasks
+ conflicts.append({"file": file_path, "tasks": all_tasks})
+
+ return conflicts
+
+ except Exception as e:
+ # If anything fails, just return empty - parallel task detection is optional
+ debug_warning(
+ "workspace_commands",
+ f"Parallel task conflict detection failed: {e}",
+ )
+ return []
+
+
# Import debug utilities
try:
from debug import (
@@ -352,7 +536,9 @@ def handle_cleanup_worktrees_command(project_dir: Path) -> None:
cleanup_all_worktrees(project_dir, confirm=True)
-def _check_git_merge_conflicts(project_dir: Path, spec_name: str) -> dict:
+def _check_git_merge_conflicts(
+ project_dir: Path, spec_name: str, base_branch: str | None = None
+) -> dict:
"""
Check for git-level merge conflicts WITHOUT modifying the working directory.
@@ -362,6 +548,7 @@ def _check_git_merge_conflicts(project_dir: Path, spec_name: str) -> dict:
Args:
project_dir: Project root directory
spec_name: Name of the spec
+ base_branch: Branch the task was created from (default: auto-detect)
Returns:
Dictionary with git conflict information:
@@ -380,21 +567,25 @@ def _check_git_merge_conflicts(project_dir: Path, spec_name: str) -> dict:
"has_conflicts": False,
"conflicting_files": [],
"needs_rebase": False,
- "base_branch": "main",
+ "base_branch": base_branch or "main",
"spec_branch": spec_branch,
"commits_behind": 0,
}
try:
- # Get the current branch (base branch)
- base_result = subprocess.run(
- ["git", "rev-parse", "--abbrev-ref", "HEAD"],
- cwd=project_dir,
- capture_output=True,
- text=True,
- )
- if base_result.returncode == 0:
- result["base_branch"] = base_result.stdout.strip()
+ # Use provided base_branch, or detect from current HEAD
+ if not base_branch:
+ base_result = subprocess.run(
+ ["git", "rev-parse", "--abbrev-ref", "HEAD"],
+ cwd=project_dir,
+ capture_output=True,
+ text=True,
+ )
+ if base_result.returncode == 0:
+ result["base_branch"] = base_result.stdout.strip()
+ else:
+ result["base_branch"] = base_branch
+ debug(MODULE, f"Using provided base branch: {base_branch}")
# Get the merge base commit
merge_base_result = subprocess.run(
@@ -553,7 +744,6 @@ def handle_merge_preview_command(
spec_name=spec_name,
)
- from merge import MergeOrchestrator
from workspace import get_existing_build_worktree
worktree_path = get_existing_build_worktree(project_dir, spec_name)
@@ -580,16 +770,32 @@ def handle_merge_preview_command(
}
try:
- # First, check for git-level conflicts (diverged branches)
- git_conflicts = _check_git_merge_conflicts(project_dir, spec_name)
-
# Determine the task's source branch (where the task was created from)
- # Use provided base_branch (from task metadata), or fall back to detected default
+ # Priority:
+ # 1. Provided base_branch (from task metadata)
+ # 2. Detect from worktree's git history (find which branch it diverged from)
+ # 3. Fall back to default branch detection (main/master)
task_source_branch = base_branch
if not task_source_branch:
- # Auto-detect the default branch (main/master) that worktrees are typically created from
+ # Try to detect from worktree's git history
+ task_source_branch = _detect_worktree_base_branch(
+ project_dir, worktree_path, spec_name
+ )
+ if not task_source_branch:
+ # Fall back to auto-detecting main/master
task_source_branch = _detect_default_branch(project_dir)
+ debug(
+ MODULE,
+ f"Using task source branch: {task_source_branch}",
+ provided=base_branch is not None,
+ )
+
+ # Check for git-level conflicts (diverged branches) using the task's source branch
+ git_conflicts = _check_git_merge_conflicts(
+ project_dir, spec_name, base_branch=task_source_branch
+ )
+
# Get actual changed files from git diff (this is the authoritative count)
all_changed_files = _get_changed_files_from_git(
worktree_path, task_source_branch
@@ -600,49 +806,39 @@ def handle_merge_preview_command(
changed_files=all_changed_files[:10], # Log first 10
)
- debug(MODULE, "Initializing MergeOrchestrator for preview...")
+ # OPTIMIZATION: Skip expensive refresh_from_git() and preview_merge() calls
+ # For merge-preview, we only need to detect:
+ # 1. Git conflicts (task vs base branch) - already calculated in _check_git_merge_conflicts()
+ # 2. Parallel task conflicts (this task vs other active tasks)
+ #
+ # For parallel task detection, we just check if this task's files overlap
+ # with files OTHER tasks have already recorded - no need to re-process all files.
- # Initialize the orchestrator
- orchestrator = MergeOrchestrator(
- project_dir,
- enable_ai=False, # Don't use AI for preview
- dry_run=True, # Don't write anything
- )
+ debug(MODULE, "Checking for parallel task conflicts (lightweight)...")
- # Refresh evolution data from the worktree
- # Compare against the task's source branch (where the task was created from)
+ # Check for parallel task conflicts by looking at existing evolution data
+ parallel_conflicts = _detect_parallel_task_conflicts(
+ project_dir, spec_name, all_changed_files
+ )
debug(
MODULE,
- f"Refreshing evolution data from worktree: {worktree_path}",
- task_source_branch=task_source_branch,
+ f"Parallel task conflicts detected: {len(parallel_conflicts)}",
+ conflicts=parallel_conflicts[:5] if parallel_conflicts else [],
)
- orchestrator.evolution_tracker.refresh_from_git(
- spec_name, worktree_path, target_branch=task_source_branch
- )
-
- # Get merge preview (semantic conflicts between parallel tasks)
- debug(MODULE, "Generating merge preview...")
- preview = orchestrator.preview_merge([spec_name])
- # Transform semantic conflicts to UI-friendly format
+ # Build conflict list - start with parallel task conflicts
conflicts = []
- for c in preview.get("conflicts", []):
- debug_verbose(
- MODULE,
- "Processing semantic conflict",
- file=c.get("file", ""),
- severity=c.get("severity", "unknown"),
- )
+ for pc in parallel_conflicts:
conflicts.append(
{
- "file": c.get("file", ""),
- "location": c.get("location", ""),
- "tasks": c.get("tasks", []),
- "severity": c.get("severity", "unknown"),
- "canAutoMerge": c.get("can_auto_merge", False),
- "strategy": c.get("strategy"),
- "reason": c.get("reason", ""),
- "type": "semantic",
+ "file": pc["file"],
+ "location": "file-level",
+ "tasks": pc["tasks"],
+ "severity": "medium",
+ "canAutoMerge": False,
+ "strategy": None,
+ "reason": f"File modified by multiple active tasks: {', '.join(pc['tasks'])}",
+ "type": "parallel",
}
)
@@ -669,13 +865,14 @@ def handle_merge_preview_command(
}
)
- summary = preview.get("summary", {})
# Count only non-lock-file conflicts
git_conflict_count = len(git_conflicts.get("conflicting_files", [])) - len(
lock_files_excluded
)
- total_conflicts = summary.get("total_conflicts", 0) + git_conflict_count
- conflict_files = summary.get("conflict_files", 0) + git_conflict_count
+ # Calculate totals from our conflict lists (git conflicts + parallel conflicts)
+ parallel_conflict_count = len(parallel_conflicts)
+ total_conflicts = git_conflict_count + parallel_conflict_count
+ conflict_files = git_conflict_count + parallel_conflict_count
# Filter lock files from the git conflicts list for the response
non_lock_conflicting_files = [
@@ -761,7 +958,7 @@ def handle_merge_preview_command(
"totalFiles": total_files_from_git,
"conflictFiles": conflict_files,
"totalConflicts": total_conflicts,
- "autoMergeable": summary.get("auto_mergeable", 0),
+ "autoMergeable": 0, # Not tracking auto-merge in lightweight mode
"hasGitConflicts": git_conflicts["has_conflicts"]
and len(non_lock_conflicting_files) > 0,
# Include path-mapped AI merge count for UI display
@@ -776,10 +973,9 @@ def handle_merge_preview_command(
"Merge preview complete",
total_files=result["summary"]["totalFiles"],
total_files_source="git_diff",
- semantic_tracked_files=summary.get("total_files", 0),
total_conflicts=result["summary"]["totalConflicts"],
has_git_conflicts=git_conflicts["has_conflicts"],
- auto_mergeable=result["summary"]["autoMergeable"],
+ parallel_conflicts=parallel_conflict_count,
path_mapped_ai_merges=len(path_mapped_ai_merges),
total_renames=len(path_mappings),
)
@@ -805,3 +1001,220 @@ def handle_merge_preview_command(
"pathMappedAIMergeCount": 0,
},
}
+
+
+def handle_create_pr_command(
+ project_dir: Path,
+ spec_name: str,
+ target_branch: str | None = None,
+ title: str | None = None,
+ draft: bool = False,
+) -> CreatePRResult:
+ """
+ Handle the --create-pr command: push branch and create a GitHub PR.
+
+ Args:
+ project_dir: Path to the project directory
+ spec_name: Name of the spec (e.g., "001-feature-name")
+ target_branch: Target branch for PR (defaults to base branch)
+ title: Custom PR title (defaults to spec name)
+ draft: Whether to create as draft PR
+
+ Returns:
+ CreatePRResult with success status, pr_url, and any errors
+ """
+ from core.worktree import WorktreeManager
+
+ print_banner()
+ print("\n" + "=" * 70)
+ print(" CREATE PULL REQUEST")
+ print("=" * 70)
+
+ # Check if worktree exists
+ worktree_path = get_existing_build_worktree(project_dir, spec_name)
+ if not worktree_path:
+ print(f"\n{icon(Icons.ERROR)} No build found for spec: {spec_name}")
+ print("\nA completed build worktree is required to create a PR.")
+ print("Run your build first, then use --create-pr.")
+ error_result: CreatePRResult = {
+ "success": False,
+ "error": "No build found for this spec",
+ }
+ return error_result
+
+ # Create worktree manager
+ manager = WorktreeManager(project_dir, base_branch=target_branch)
+
+ print(f"\n{icon(Icons.BRANCH)} Pushing branch and creating PR...")
+ print(f" Spec: {spec_name}")
+ print(f" Target: {target_branch or manager.base_branch}")
+ if title:
+ print(f" Title: {title}")
+ if draft:
+ print(" Mode: Draft PR")
+
+ # Push and create PR with exception handling for clean JSON output
+ try:
+ raw_result = manager.push_and_create_pr(
+ spec_name=spec_name,
+ target_branch=target_branch,
+ title=title,
+ draft=draft,
+ )
+ except Exception as e:
+ debug_error(MODULE, f"Exception during PR creation: {e}")
+ error_result: CreatePRResult = {
+ "success": False,
+ "error": str(e),
+ "message": "Failed to create PR",
+ }
+ print(f"\n{icon(Icons.ERROR)} Failed to create PR: {e}")
+ print(json.dumps(error_result))
+ return error_result
+
+ # Convert PushAndCreatePRResult to CreatePRResult
+ result: CreatePRResult = {
+ "success": raw_result.get("success", False),
+ "pr_url": raw_result.get("pr_url"),
+ "already_exists": raw_result.get("already_exists", False),
+ "error": raw_result.get("error"),
+ "message": raw_result.get("message"),
+ "pushed": raw_result.get("pushed", False),
+ "remote": raw_result.get("remote", ""),
+ "branch": raw_result.get("branch", ""),
+ }
+
+ if result.get("success"):
+ pr_url = result.get("pr_url")
+ already_exists = result.get("already_exists", False)
+
+ if already_exists:
+ print(f"\n{icon(Icons.SUCCESS)} PR already exists!")
+ else:
+ print(f"\n{icon(Icons.SUCCESS)} PR created successfully!")
+
+ if pr_url:
+ print(f"\n{icon(Icons.LINK)} {pr_url}")
+ else:
+ print(f"\n{icon(Icons.INFO)} Check GitHub for the PR URL")
+
+ print("\nNext steps:")
+ print(" 1. Review the PR on GitHub")
+ print(" 2. Request reviews from your team")
+ print(" 3. Merge when approved")
+
+ # Output JSON for frontend parsing
+ print(json.dumps(result))
+ return result
+ else:
+ error = result.get("error", "Unknown error")
+ print(f"\n{icon(Icons.ERROR)} Failed to create PR: {error}")
+ # Output JSON for frontend parsing
+ print(json.dumps(result))
+ return result
+
+
+def cleanup_old_worktrees_command(
+ project_dir: Path, days: int = 30, dry_run: bool = False
+) -> dict:
+ """
+ Clean up old worktrees that haven't been modified in the specified number of days.
+
+ Args:
+ project_dir: Project root directory
+ days: Number of days threshold (default: 30)
+ dry_run: If True, only show what would be removed (default: False)
+
+ Returns:
+ Dictionary with cleanup results
+ """
+ try:
+ manager = WorktreeManager(project_dir)
+
+ removed, failed = manager.cleanup_old_worktrees(
+ days_threshold=days, dry_run=dry_run
+ )
+
+ return {
+ "success": True,
+ "removed": removed,
+ "failed": failed,
+ "dry_run": dry_run,
+ "days_threshold": days,
+ }
+
+ except Exception as e:
+ return {
+ "success": False,
+ "error": str(e),
+ "removed": [],
+ "failed": [],
+ }
+
+
+def worktree_summary_command(project_dir: Path) -> dict:
+ """
+ Get a summary of all worktrees with age information.
+
+ Args:
+ project_dir: Project root directory
+
+ Returns:
+ Dictionary with worktree summary data
+ """
+ try:
+ manager = WorktreeManager(project_dir)
+
+ # Print to console for CLI usage
+ manager.print_worktree_summary()
+
+ # Also return data for programmatic access
+ worktrees = manager.list_all_worktrees()
+ warning = manager.get_worktree_count_warning()
+
+ # Categorize by age
+ recent = []
+ week_old = []
+ month_old = []
+ very_old = []
+ unknown_age = []
+
+ for info in worktrees:
+ data = {
+ "spec_name": info.spec_name,
+ "days_since_last_commit": info.days_since_last_commit,
+ "commit_count": info.commit_count,
+ }
+
+ if info.days_since_last_commit is None:
+ unknown_age.append(data)
+ elif info.days_since_last_commit < 7:
+ recent.append(data)
+ elif info.days_since_last_commit < 30:
+ week_old.append(data)
+ elif info.days_since_last_commit < 90:
+ month_old.append(data)
+ else:
+ very_old.append(data)
+
+ return {
+ "success": True,
+ "total_worktrees": len(worktrees),
+ "categories": {
+ "recent": recent,
+ "week_old": week_old,
+ "month_old": month_old,
+ "very_old": very_old,
+ "unknown_age": unknown_age,
+ },
+ "warning": warning,
+ }
+
+ except Exception as e:
+ return {
+ "success": False,
+ "error": str(e),
+ "total_worktrees": 0,
+ "categories": {},
+ "warning": None,
+ }
diff --git a/apps/backend/commit_message.py b/apps/backend/commit_message.py
index 0518f20fba..b90242590c 100644
--- a/apps/backend/commit_message.py
+++ b/apps/backend/commit_message.py
@@ -231,7 +231,9 @@ async def _call_claude(prompt: str) -> str:
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
logger.info(f"Generated commit message: {len(response_text)} chars")
diff --git a/apps/backend/core/agent.py b/apps/backend/core/agent.py
index 8b2cc8d540..6d9ffe3702 100644
--- a/apps/backend/core/agent.py
+++ b/apps/backend/core/agent.py
@@ -39,7 +39,7 @@
run_followup_planner,
save_session_memory,
save_session_to_graphiti,
- sync_plan_to_source,
+ sync_spec_to_source,
)
# Ensure all exports are available at module level
@@ -57,7 +57,7 @@
"load_implementation_plan",
"find_subtask_in_plan",
"find_phase_for_subtask",
- "sync_plan_to_source",
+ "sync_spec_to_source",
"AUTO_CONTINUE_DELAY_SECONDS",
"HUMAN_INTERVENTION_FILE",
]
diff --git a/apps/backend/core/auth.py b/apps/backend/core/auth.py
index be105e1ff9..3e0ccac3a5 100644
--- a/apps/backend/core/auth.py
+++ b/apps/backend/core/auth.py
@@ -23,12 +23,27 @@
# Environment variables to pass through to SDK subprocess
# NOTE: ANTHROPIC_API_KEY is intentionally excluded to prevent silent API billing
SDK_ENV_VARS = [
+ # API endpoint configuration
"ANTHROPIC_BASE_URL",
"ANTHROPIC_AUTH_TOKEN",
+ # Microsoft Foundry (Azure AI Foundry) configuration
+ # See: https://code.claude.com/docs/en/microsoft-foundry
+ "CLAUDE_CODE_USE_FOUNDRY", # Set to "1" to enable Microsoft Foundry
+ "ANTHROPIC_FOUNDRY_API_KEY", # API key (optional if using Entra ID auth)
+ "ANTHROPIC_FOUNDRY_BASE_URL", # Full endpoint: https://{resource}.services.ai.azure.com
+ "ANTHROPIC_FOUNDRY_RESOURCE", # Azure resource name (alternative to BASE_URL)
+ # Model overrides (used for both standard API and Microsoft Foundry deployments)
+ "ANTHROPIC_MODEL",
+ "ANTHROPIC_DEFAULT_HAIKU_MODEL", # e.g., "claude-haiku-4-5"
+ "ANTHROPIC_DEFAULT_SONNET_MODEL", # e.g., "claude-sonnet-4-5"
+ "ANTHROPIC_DEFAULT_OPUS_MODEL", # e.g., "claude-opus-4-1"
+ # SDK behavior configuration
"NO_PROXY",
"DISABLE_TELEMETRY",
"DISABLE_COST_WARNINGS",
"API_TIMEOUT_MS",
+ # Windows-specific: Git Bash path for Claude Code CLI
+ "CLAUDE_CODE_GIT_BASH_PATH",
]
@@ -127,9 +142,10 @@ def get_auth_token() -> str | None:
Get authentication token from environment variables or system credential store.
Checks multiple sources in priority order:
- 1. CLAUDE_CODE_OAUTH_TOKEN (env var)
- 2. ANTHROPIC_AUTH_TOKEN (CCR/proxy env var for enterprise setups)
- 3. System credential store (macOS Keychain, Windows Credential Manager)
+ 1. CLAUDE_CODE_USE_FOUNDRY with ANTHROPIC_FOUNDRY_API_KEY (Azure AI Foundry)
+ 2. CLAUDE_CODE_OAUTH_TOKEN (env var)
+ 3. ANTHROPIC_AUTH_TOKEN (CCR/proxy env var for enterprise setups)
+ 4. System credential store (macOS Keychain, Windows Credential Manager)
NOTE: ANTHROPIC_API_KEY is intentionally NOT supported to prevent
silent billing to user's API credits when OAuth is misconfigured.
@@ -137,7 +153,16 @@ def get_auth_token() -> str | None:
Returns:
Token string if found, None otherwise
"""
- # First check environment variables
+ # Check for Microsoft Foundry mode first
+ if os.environ.get("CLAUDE_CODE_USE_FOUNDRY") == "1":
+ foundry_key = os.environ.get("ANTHROPIC_FOUNDRY_API_KEY")
+ if foundry_key:
+ return foundry_key
+ # Foundry mode without API key = Entra ID auth (handled by SDK)
+ # Return a placeholder to indicate auth is configured
+ return "foundry-entra-id"
+
+ # Check environment variables
for var in AUTH_TOKEN_ENV_VARS:
token = os.environ.get(var)
if token:
@@ -208,6 +233,85 @@ def require_auth_token() -> str:
return token
+def _find_git_bash_path() -> str | None:
+ """
+ Find git-bash (bash.exe) path on Windows.
+
+ Uses 'where git' to find git.exe, then derives bash.exe location from it.
+ Git for Windows installs bash.exe in the 'bin' directory alongside git.exe
+ or in the parent 'bin' directory when git.exe is in 'cmd'.
+
+ Returns:
+ Full path to bash.exe if found, None otherwise
+ """
+ if platform.system() != "Windows":
+ return None
+
+ # If already set in environment, use that
+ existing = os.environ.get("CLAUDE_CODE_GIT_BASH_PATH")
+ if existing and os.path.exists(existing):
+ return existing
+
+ git_path = None
+
+ # Method 1: Use 'where' command to find git.exe
+ try:
+ # Use where.exe explicitly for reliability
+ result = subprocess.run(
+ ["where.exe", "git"],
+ capture_output=True,
+ text=True,
+ timeout=5,
+ shell=False,
+ )
+
+ if result.returncode == 0 and result.stdout.strip():
+ git_paths = result.stdout.strip().splitlines()
+ if git_paths:
+ git_path = git_paths[0].strip()
+ except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError):
+ # Intentionally suppress errors - best-effort detection with fallback to common paths
+ pass
+
+ # Method 2: Check common installation paths if 'where' didn't work
+ if not git_path:
+ common_git_paths = [
+ os.path.expandvars(r"%PROGRAMFILES%\Git\cmd\git.exe"),
+ os.path.expandvars(r"%PROGRAMFILES%\Git\bin\git.exe"),
+ os.path.expandvars(r"%PROGRAMFILES(X86)%\Git\cmd\git.exe"),
+ os.path.expandvars(r"%LOCALAPPDATA%\Programs\Git\cmd\git.exe"),
+ ]
+ for path in common_git_paths:
+ if os.path.exists(path):
+ git_path = path
+ break
+
+ if not git_path:
+ return None
+
+ # Derive bash.exe location from git.exe location
+ # Git for Windows structure:
+ # C:\...\Git\cmd\git.exe -> bash.exe is at C:\...\Git\bin\bash.exe
+ # C:\...\Git\bin\git.exe -> bash.exe is at C:\...\Git\bin\bash.exe
+ # C:\...\Git\mingw64\bin\git.exe -> bash.exe is at C:\...\Git\bin\bash.exe
+ git_dir = os.path.dirname(git_path)
+ git_parent = os.path.dirname(git_dir)
+ git_grandparent = os.path.dirname(git_parent)
+
+ # Check common bash.exe locations relative to git installation
+ possible_bash_paths = [
+ os.path.join(git_parent, "bin", "bash.exe"), # cmd -> bin
+ os.path.join(git_dir, "bash.exe"), # If git.exe is in bin
+ os.path.join(git_grandparent, "bin", "bash.exe"), # mingw64/bin -> bin
+ ]
+
+ for bash_path in possible_bash_paths:
+ if os.path.exists(bash_path):
+ return bash_path
+
+ return None
+
+
def get_sdk_env_vars() -> dict[str, str]:
"""
Get environment variables to pass to SDK.
@@ -215,6 +319,8 @@ def get_sdk_env_vars() -> dict[str, str]:
Collects relevant env vars (ANTHROPIC_BASE_URL, etc.) that should
be passed through to the claude-agent-sdk subprocess.
+ On Windows, auto-detects CLAUDE_CODE_GIT_BASH_PATH if not already set.
+
Returns:
Dict of env var name -> value for non-empty vars
"""
@@ -223,6 +329,14 @@ def get_sdk_env_vars() -> dict[str, str]:
value = os.environ.get(var)
if value:
env[var] = value
+
+ # On Windows, auto-detect git-bash path if not already set
+ # Claude Code CLI requires bash.exe to run on Windows
+ if platform.system() == "Windows" and "CLAUDE_CODE_GIT_BASH_PATH" not in env:
+ bash_path = _find_git_bash_path()
+ if bash_path:
+ env["CLAUDE_CODE_GIT_BASH_PATH"] = bash_path
+
return env
diff --git a/apps/backend/core/client.py b/apps/backend/core/client.py
index 3d8dbe8de6..69c9c0e239 100644
--- a/apps/backend/core/client.py
+++ b/apps/backend/core/client.py
@@ -16,6 +16,7 @@
import json
import logging
import os
+import platform
import threading
import time
from pathlib import Path
@@ -488,6 +489,12 @@ def create_client(
# Collect env vars to pass to SDK (ANTHROPIC_BASE_URL, etc.)
sdk_env = get_sdk_env_vars()
+ # Debug: Log git-bash path detection on Windows
+ if "CLAUDE_CODE_GIT_BASH_PATH" in sdk_env:
+ logger.info(f"Git Bash path found: {sdk_env['CLAUDE_CODE_GIT_BASH_PATH']}")
+ elif platform.system() == "Windows":
+ logger.warning("Git Bash path not detected on Windows!")
+
# Check if Linear integration is enabled
linear_enabled = is_linear_enabled()
linear_api_key = os.environ.get("LINEAR_API_KEY", "")
@@ -538,6 +545,48 @@ def create_client(
# cases where Claude uses absolute paths for file operations
project_path_str = str(project_dir.resolve())
spec_path_str = str(spec_dir.resolve())
+
+ # Detect if we're running in a worktree and get the original project directory
+ # Worktrees are located in either:
+ # - .auto-claude/worktrees/tasks/{spec-name}/ (new location)
+ # - .worktrees/{spec-name}/ (legacy location)
+ # When running in a worktree, we need to allow access to both the worktree
+ # and the original project's .auto-claude/ directory for spec files
+ original_project_permissions = []
+ resolved_project_path = project_dir.resolve()
+
+ # Check for worktree paths and extract original project directory
+ # This handles spec worktrees, PR review worktrees, and legacy worktrees
+ # Note: Windows paths are normalized to forward slashes before comparison
+ worktree_markers = [
+ "/.auto-claude/worktrees/tasks/", # Spec/task worktrees
+ "/.auto-claude/github/pr/worktrees/", # PR review worktrees
+ "/.worktrees/", # Legacy worktree location
+ ]
+ project_path_posix = str(resolved_project_path).replace("\\", "/")
+
+ for marker in worktree_markers:
+ if marker in project_path_posix:
+ # Extract the original project directory (parent of worktree location)
+ # Use rsplit to get the rightmost occurrence (handles nested projects)
+ original_project_str = project_path_posix.rsplit(marker, 1)[0]
+ original_project_dir = Path(original_project_str)
+
+ # Grant permissions for relevant directories in the original project
+ permission_ops = ["Read", "Write", "Edit", "Glob", "Grep"]
+ dirs_to_permit = [
+ original_project_dir / ".auto-claude",
+ original_project_dir / ".worktrees", # Legacy support
+ ]
+
+ for dir_path in dirs_to_permit:
+ if dir_path.exists():
+ path_str = str(dir_path.resolve())
+ original_project_permissions.extend(
+ [f"{op}({path_str}/**)" for op in permission_ops]
+ )
+ break
+
security_settings = {
"sandbox": {"enabled": True, "autoAllowBashIfSandboxed": True},
"permissions": {
@@ -560,6 +609,9 @@ def create_client(
f"Read({spec_path_str}/**)",
f"Write({spec_path_str}/**)",
f"Edit({spec_path_str}/**)",
+ # Allow original project's .auto-claude/ and .worktrees/ directories
+ # when running in a worktree (fixes issue #385 - permission errors)
+ *original_project_permissions,
# Bash permission granted here, but actual commands are validated
# by the bash_security_hook (see security.py for allowed commands)
"Bash(*)",
@@ -596,6 +648,8 @@ def create_client(
print(f"Security settings: {settings_file}")
print(" - Sandbox enabled (OS-level bash isolation)")
print(f" - Filesystem restricted to: {project_dir.resolve()}")
+ if original_project_permissions:
+ print(" - Worktree permissions: granted for original project directories")
print(" - Bash commands restricted to allowlist")
if max_thinking_tokens:
print(f" - Extended thinking: {max_thinking_tokens:,} tokens")
@@ -742,6 +796,12 @@ def create_client(
"settings": str(settings_file.resolve()),
"env": sdk_env, # Pass ANTHROPIC_BASE_URL etc. to subprocess
"max_thinking_tokens": max_thinking_tokens, # Extended thinking budget
+ "max_buffer_size": 10
+ * 1024
+ * 1024, # 10MB buffer (default: 1MB) - fixes large tool results
+ # Enable file checkpointing to track file read/write state across tool calls
+ # This prevents "File has not been read yet" errors in recovery sessions
+ "enable_file_checkpointing": True,
}
# Add structured output format if specified
diff --git a/apps/backend/core/dependency_validator.py b/apps/backend/core/dependency_validator.py
new file mode 100644
index 0000000000..8517cb3631
--- /dev/null
+++ b/apps/backend/core/dependency_validator.py
@@ -0,0 +1,50 @@
+"""
+Dependency Validator
+====================
+
+Validates platform-specific dependencies are installed before running agents.
+"""
+
+import sys
+from pathlib import Path
+
+
+def validate_platform_dependencies() -> None:
+ """
+ Validate that platform-specific dependencies are installed.
+
+ Raises:
+ SystemExit: If required platform-specific dependencies are missing,
+ with helpful installation instructions.
+ """
+ # Check Windows-specific dependencies
+ if sys.platform == "win32" and sys.version_info >= (3, 12):
+ try:
+ import pywintypes # noqa: F401
+ except ImportError:
+ _exit_with_pywin32_error()
+
+
+def _exit_with_pywin32_error() -> None:
+ """Exit with helpful error message for missing pywin32."""
+ # Use sys.prefix to detect the virtual environment path
+ # This works for venv and poetry environments
+ venv_activate = Path(sys.prefix) / "Scripts" / "activate"
+
+ sys.exit(
+ "Error: Required Windows dependency 'pywin32' is not installed.\n"
+ "\n"
+ "Auto Claude requires pywin32 on Windows for LadybugDB/Graphiti memory integration.\n"
+ "\n"
+ "To fix this:\n"
+ "1. Activate your virtual environment:\n"
+ f" {venv_activate}\n"
+ "\n"
+ "2. Install pywin32:\n"
+ " pip install pywin32>=306\n"
+ "\n"
+ " Or reinstall all dependencies:\n"
+ " pip install -r requirements.txt\n"
+ "\n"
+ f"Current Python: {sys.executable}\n"
+ )
diff --git a/apps/backend/core/git_executable.py b/apps/backend/core/git_executable.py
new file mode 100644
index 0000000000..d17a3e07ef
--- /dev/null
+++ b/apps/backend/core/git_executable.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+"""
+Git Executable Finder
+======================
+
+Utility to find the git executable, with Windows-specific fallbacks.
+Separated into its own module to avoid circular imports.
+"""
+
+import os
+import shutil
+import subprocess
+from pathlib import Path
+
+_cached_git_path: str | None = None
+
+
+def get_git_executable() -> str:
+ """Find the git executable, with Windows-specific fallbacks.
+
+ Returns the path to git executable. On Windows, checks multiple sources:
+ 1. CLAUDE_CODE_GIT_BASH_PATH env var (set by Electron frontend)
+ 2. shutil.which (if git is in PATH)
+ 3. Common installation locations
+ 4. Windows 'where' command
+
+ Caches the result after first successful find.
+ """
+ global _cached_git_path
+
+ # Return cached result if available
+ if _cached_git_path is not None:
+ return _cached_git_path
+
+ git_path = _find_git_executable()
+ _cached_git_path = git_path
+ return git_path
+
+
+def _find_git_executable() -> str:
+ """Internal function to find git executable."""
+ # 1. Check CLAUDE_CODE_GIT_BASH_PATH (set by Electron frontend)
+ # This env var points to bash.exe, we can derive git.exe from it
+ bash_path = os.environ.get("CLAUDE_CODE_GIT_BASH_PATH")
+ if bash_path:
+ try:
+ bash_path_obj = Path(bash_path)
+ if bash_path_obj.exists():
+ git_dir = bash_path_obj.parent.parent
+ # Try cmd/git.exe first (preferred), then bin/git.exe
+ for git_subpath in ["cmd/git.exe", "bin/git.exe"]:
+ git_path = git_dir / git_subpath
+ if git_path.is_file():
+ return str(git_path)
+ except (OSError, ValueError):
+ pass
+
+ # 2. Try shutil.which (works if git is in PATH)
+ git_path = shutil.which("git")
+ if git_path:
+ return git_path
+
+ # 3. Windows-specific: check common installation locations
+ if os.name == "nt":
+ common_paths = [
+ os.path.expandvars(r"%PROGRAMFILES%\Git\cmd\git.exe"),
+ os.path.expandvars(r"%PROGRAMFILES%\Git\bin\git.exe"),
+ os.path.expandvars(r"%PROGRAMFILES(X86)%\Git\cmd\git.exe"),
+ os.path.expandvars(r"%LOCALAPPDATA%\Programs\Git\cmd\git.exe"),
+ r"C:\Program Files\Git\cmd\git.exe",
+ r"C:\Program Files (x86)\Git\cmd\git.exe",
+ ]
+ for path in common_paths:
+ try:
+ if os.path.isfile(path):
+ return path
+ except OSError:
+ continue
+
+ # 4. Try 'where' command with shell=True (more reliable on Windows)
+ try:
+ result = subprocess.run(
+ "where git",
+ capture_output=True,
+ text=True,
+ timeout=5,
+ shell=True,
+ )
+ if result.returncode == 0 and result.stdout.strip():
+ found_path = result.stdout.strip().split("\n")[0].strip()
+ if found_path and os.path.isfile(found_path):
+ return found_path
+ except (subprocess.TimeoutExpired, OSError):
+ pass
+
+ # Default fallback - let subprocess handle it (may fail)
+ return "git"
+
+
+def run_git(
+ args: list[str],
+ cwd: Path | str | None = None,
+ timeout: int = 60,
+ input_data: str | None = None,
+) -> subprocess.CompletedProcess:
+ """Run a git command with proper executable finding.
+
+ Args:
+ args: Git command arguments (without 'git' prefix)
+ cwd: Working directory for the command
+ timeout: Command timeout in seconds (default: 60)
+ input_data: Optional string data to pass to stdin
+
+ Returns:
+ CompletedProcess with command results.
+ """
+ git = get_git_executable()
+ try:
+ return subprocess.run(
+ [git] + args,
+ cwd=cwd,
+ input=input_data,
+ capture_output=True,
+ text=True,
+ encoding="utf-8",
+ errors="replace",
+ timeout=timeout,
+ )
+ except subprocess.TimeoutExpired:
+ return subprocess.CompletedProcess(
+ args=[git] + args,
+ returncode=-1,
+ stdout="",
+ stderr=f"Command timed out after {timeout} seconds",
+ )
+ except FileNotFoundError:
+ return subprocess.CompletedProcess(
+ args=[git] + args,
+ returncode=-1,
+ stdout="",
+ stderr="Git executable not found. Please ensure git is installed and in PATH.",
+ )
diff --git a/apps/backend/core/phase_event.py b/apps/backend/core/phase_event.py
index a86321cf02..acc034605b 100644
--- a/apps/backend/core/phase_event.py
+++ b/apps/backend/core/phase_event.py
@@ -52,4 +52,8 @@ def emit_phase(
print(f"{PHASE_MARKER_PREFIX}{json.dumps(payload, default=str)}", flush=True)
except (OSError, UnicodeEncodeError) as e:
if _DEBUG:
- print(f"[phase_event] emit failed: {e}", file=sys.stderr, flush=True)
+ try:
+ sys.stderr.write(f"[phase_event] emit failed: {e}\n")
+ sys.stderr.flush()
+ except (OSError, UnicodeEncodeError):
+ pass # Truly silent on complete I/O failure
diff --git a/apps/backend/core/workspace.py b/apps/backend/core/workspace.py
index ddfd49059b..6ae292ab6b 100644
--- a/apps/backend/core/workspace.py
+++ b/apps/backend/core/workspace.py
@@ -4,7 +4,7 @@
=============================================
Handles workspace isolation through Git worktrees, where each spec
-gets its own isolated worktree in .worktrees/{spec-name}/.
+gets its own isolated worktree in .auto-claude/worktrees/tasks/{spec-name}/.
This module has been refactored for better maintainability:
- Models and enums: workspace/models.py
@@ -90,12 +90,18 @@ def is_debug_enabled():
from core.workspace.git_utils import (
detect_file_renames as _detect_file_renames,
)
+from core.workspace.git_utils import (
+ get_binary_file_content_from_ref as _get_binary_file_content_from_ref,
+)
from core.workspace.git_utils import (
get_changed_files_from_branch as _get_changed_files_from_branch,
)
from core.workspace.git_utils import (
get_file_content_from_ref as _get_file_content_from_ref,
)
+from core.workspace.git_utils import (
+ is_binary_file as _is_binary_file,
+)
from core.workspace.git_utils import (
is_lock_file as _is_lock_file,
)
@@ -239,14 +245,16 @@ def merge_existing_build(
if smart_result is not None:
# Smart merge handled it (success or identified conflicts)
if smart_result.get("success"):
- # Check if smart merge resolved git conflicts or path-mapped files
+ # Check if smart merge actually DID work (resolved conflicts via AI)
+ # NOTE: "files_merged" in stats is misleading - it's "files TO merge" not "files WERE merged"
+ # The smart merge preview returns this count but doesn't actually perform the merge
+ # in the no-conflict path. We only skip git merge if AI actually did work.
stats = smart_result.get("stats", {})
had_conflicts = stats.get("conflicts_resolved", 0) > 0
- files_merged = stats.get("files_merged", 0) > 0
ai_assisted = stats.get("ai_assisted", 0) > 0
- if had_conflicts or files_merged or ai_assisted:
- # Git conflicts were resolved OR path-mapped files were AI merged
+ if had_conflicts or ai_assisted:
+ # AI actually resolved conflicts or assisted with merges
# Changes are already written and staged - no need for git merge
_print_merge_success(
no_commit, stats, spec_name=spec_name, keep_worktree=True
@@ -258,7 +266,8 @@ def merge_existing_build(
return True
else:
- # No conflicts and no files merged - do standard git merge
+ # No conflicts needed AI resolution - do standard git merge
+ # This is the common case: no divergence, just need to merge changes
success_result = manager.merge_worktree(
spec_name, delete_after=False, no_commit=no_commit
)
@@ -773,28 +782,44 @@ def _resolve_git_conflicts_with_ai(
print(muted(f" Copying {len(new_files)} new file(s) first (dependencies)..."))
for file_path, status in new_files:
try:
- content = _get_file_content_from_ref(
- project_dir, spec_branch, file_path
- )
- if content is not None:
- # Apply path mapping - write to new location if file was renamed
- target_file_path = _apply_path_mapping(file_path, path_mappings)
- target_path = project_dir / target_file_path
- target_path.parent.mkdir(parents=True, exist_ok=True)
- target_path.write_text(content, encoding="utf-8")
- subprocess.run(
- ["git", "add", target_file_path],
- cwd=project_dir,
- capture_output=True,
+ # Apply path mapping - write to new location if file was renamed
+ target_file_path = _apply_path_mapping(file_path, path_mappings)
+ target_path = project_dir / target_file_path
+ target_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # Handle binary files differently - use bytes instead of text
+ if _is_binary_file(file_path):
+ binary_content = _get_binary_file_content_from_ref(
+ project_dir, spec_branch, file_path
+ )
+ if binary_content is not None:
+ target_path.write_bytes(binary_content)
+ subprocess.run(
+ ["git", "add", target_file_path],
+ cwd=project_dir,
+ capture_output=True,
+ )
+ resolved_files.append(target_file_path)
+ debug(MODULE, f"Copied new binary file: {file_path}")
+ else:
+ content = _get_file_content_from_ref(
+ project_dir, spec_branch, file_path
)
- resolved_files.append(target_file_path)
- if target_file_path != file_path:
- debug(
- MODULE,
- f"Copied new file with path mapping: {file_path} -> {target_file_path}",
+ if content is not None:
+ target_path.write_text(content, encoding="utf-8")
+ subprocess.run(
+ ["git", "add", target_file_path],
+ cwd=project_dir,
+ capture_output=True,
)
- else:
- debug(MODULE, f"Copied new file: {file_path}")
+ resolved_files.append(target_file_path)
+ if target_file_path != file_path:
+ debug(
+ MODULE,
+ f"Copied new file with path mapping: {file_path} -> {target_file_path}",
+ )
+ else:
+ debug(MODULE, f"Copied new file: {file_path}")
except Exception as e:
debug_warning(MODULE, f"Could not copy new file {file_path}: {e}")
@@ -1118,24 +1143,44 @@ def _resolve_git_conflicts_with_ai(
)
else:
# Modified without path change - simple copy
- content = _get_file_content_from_ref(
- project_dir, spec_branch, file_path
- )
- if content is not None:
- target_path = project_dir / target_file_path
- target_path.parent.mkdir(parents=True, exist_ok=True)
- target_path.write_text(content, encoding="utf-8")
- subprocess.run(
- ["git", "add", target_file_path],
- cwd=project_dir,
- capture_output=True,
+ # Check if binary file to use correct read/write method
+ target_path = project_dir / target_file_path
+ target_path.parent.mkdir(parents=True, exist_ok=True)
+
+ if _is_binary_file(file_path):
+ binary_content = _get_binary_file_content_from_ref(
+ project_dir, spec_branch, file_path
+ )
+ if binary_content is not None:
+ target_path.write_bytes(binary_content)
+ subprocess.run(
+ ["git", "add", target_file_path],
+ cwd=project_dir,
+ capture_output=True,
+ )
+ resolved_files.append(target_file_path)
+ if target_file_path != file_path:
+ debug(
+ MODULE,
+ f"Merged binary with path mapping: {file_path} -> {target_file_path}",
+ )
+ else:
+ content = _get_file_content_from_ref(
+ project_dir, spec_branch, file_path
)
- resolved_files.append(target_file_path)
- if target_file_path != file_path:
- debug(
- MODULE,
- f"Merged with path mapping: {file_path} -> {target_file_path}",
+ if content is not None:
+ target_path.write_text(content, encoding="utf-8")
+ subprocess.run(
+ ["git", "add", target_file_path],
+ cwd=project_dir,
+ capture_output=True,
)
+ resolved_files.append(target_file_path)
+ if target_file_path != file_path:
+ debug(
+ MODULE,
+ f"Merged with path mapping: {file_path} -> {target_file_path}",
+ )
except Exception as e:
print(muted(f" Warning: Could not process {file_path}: {e}"))
@@ -1431,7 +1476,9 @@ async def _merge_file_with_ai_async(
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
if response_text:
diff --git a/apps/backend/core/workspace/__init__.py b/apps/backend/core/workspace/__init__.py
index e5b5ac711a..db278769ea 100644
--- a/apps/backend/core/workspace/__init__.py
+++ b/apps/backend/core/workspace/__init__.py
@@ -4,7 +4,7 @@
=============================
Handles workspace isolation through Git worktrees, where each spec
-gets its own isolated worktree in .worktrees/{spec-name}/.
+gets its own isolated worktree in .auto-claude/worktrees/tasks/{spec-name}/.
This package provides:
- Workspace setup and configuration
@@ -62,6 +62,7 @@
MAX_SYNTAX_FIX_RETRIES,
MERGE_LOCK_TIMEOUT,
_create_conflict_file_with_git,
+ _get_binary_file_content_from_ref,
_get_changed_files_from_branch,
_get_file_content_from_ref,
_is_binary_file,
@@ -70,6 +71,7 @@
_is_process_running,
_validate_merged_syntax,
create_conflict_file_with_git,
+ get_binary_file_content_from_ref,
get_changed_files_from_branch,
get_current_branch,
get_existing_build_worktree,
@@ -117,6 +119,7 @@
"get_current_branch",
"get_existing_build_worktree",
"get_file_content_from_ref",
+ "get_binary_file_content_from_ref",
"get_changed_files_from_branch",
"is_process_running",
"is_binary_file",
diff --git a/apps/backend/core/workspace/finalization.py b/apps/backend/core/workspace/finalization.py
index 3078f2f8a2..a398391f84 100644
--- a/apps/backend/core/workspace/finalization.py
+++ b/apps/backend/core/workspace/finalization.py
@@ -169,7 +169,15 @@ def handle_workspace_choice(
if staging_path:
print(highlight(f" cd {staging_path}"))
else:
- print(highlight(f" cd {project_dir}/.worktrees/{spec_name}"))
+ worktree_path = get_existing_build_worktree(project_dir, spec_name)
+ if worktree_path:
+ print(highlight(f" cd {worktree_path}"))
+ else:
+ print(
+ highlight(
+ f" cd {project_dir}/.auto-claude/worktrees/tasks/{spec_name}"
+ )
+ )
# Show likely test/run commands
if staging_path:
@@ -232,7 +240,15 @@ def handle_workspace_choice(
if staging_path:
print(highlight(f" cd {staging_path}"))
else:
- print(highlight(f" cd {project_dir}/.worktrees/{spec_name}"))
+ worktree_path = get_existing_build_worktree(project_dir, spec_name)
+ if worktree_path:
+ print(highlight(f" cd {worktree_path}"))
+ else:
+ print(
+ highlight(
+ f" cd {project_dir}/.auto-claude/worktrees/tasks/{spec_name}"
+ )
+ )
print()
print("When you're ready to add it:")
print(highlight(f" python auto-claude/run.py --spec {spec_name} --merge"))
diff --git a/apps/backend/core/workspace/git_utils.py b/apps/backend/core/workspace/git_utils.py
index c027c4a426..5f6093b2e6 100644
--- a/apps/backend/core/workspace/git_utils.py
+++ b/apps/backend/core/workspace/git_utils.py
@@ -10,6 +10,45 @@
import subprocess
from pathlib import Path
+from core.git_executable import get_git_executable, run_git
+
+__all__ = [
+ # Exported helpers
+ "get_git_executable",
+ "run_git",
+ # Constants
+ "MAX_FILE_LINES_FOR_AI",
+ "MAX_PARALLEL_AI_MERGES",
+ "LOCK_FILES",
+ "BINARY_EXTENSIONS",
+ "MERGE_LOCK_TIMEOUT",
+ "MAX_SYNTAX_FIX_RETRIES",
+ # Functions
+ "detect_file_renames",
+ "apply_path_mapping",
+ "get_merge_base",
+ "has_uncommitted_changes",
+ "get_current_branch",
+ "get_existing_build_worktree",
+ "get_file_content_from_ref",
+ "get_binary_file_content_from_ref",
+ "get_changed_files_from_branch",
+ "is_process_running",
+ "is_binary_file",
+ "is_lock_file",
+ "validate_merged_syntax",
+ "create_conflict_file_with_git",
+ # Backward compat aliases
+ "_is_process_running",
+ "_is_binary_file",
+ "_is_lock_file",
+ "_validate_merged_syntax",
+ "_get_file_content_from_ref",
+ "_get_binary_file_content_from_ref",
+ "_get_changed_files_from_branch",
+ "_create_conflict_file_with_git",
+]
+
# Constants for merge limits
MAX_FILE_LINES_FOR_AI = 5000 # Skip AI for files larger than this
MAX_PARALLEL_AI_MERGES = 5 # Limit concurrent AI merge operations
@@ -33,6 +72,7 @@
}
BINARY_EXTENSIONS = {
+ # Images
".png",
".jpg",
".jpeg",
@@ -41,6 +81,11 @@
".webp",
".bmp",
".svg",
+ ".tiff",
+ ".tif",
+ ".heic",
+ ".heif",
+ # Documents
".pdf",
".doc",
".docx",
@@ -48,32 +93,63 @@
".xlsx",
".ppt",
".pptx",
+ # Archives
".zip",
".tar",
".gz",
".rar",
".7z",
+ ".bz2",
+ ".xz",
+ ".zst",
+ # Executables and libraries
".exe",
".dll",
".so",
".dylib",
".bin",
+ ".msi",
+ ".app",
+ # WebAssembly
+ ".wasm",
+ # Audio
".mp3",
- ".mp4",
".wav",
+ ".ogg",
+ ".flac",
+ ".aac",
+ ".m4a",
+ # Video
+ ".mp4",
".avi",
".mov",
".mkv",
+ ".webm",
+ ".wmv",
+ ".flv",
+ # Fonts
".woff",
".woff2",
".ttf",
".otf",
".eot",
+ # Compiled code
".pyc",
".pyo",
".class",
".o",
".obj",
+ # Data files
+ ".dat",
+ ".db",
+ ".sqlite",
+ ".sqlite3",
+ # Other binary formats
+ ".cur",
+ ".ani",
+ ".pbm",
+ ".pgm",
+ ".ppm",
}
# Merge lock timeout in seconds
@@ -113,9 +189,8 @@ def detect_file_renames(
# -M flag enables rename detection
# --diff-filter=R shows only renames
# --name-status shows status and file names
- result = subprocess.run(
+ result = run_git(
[
- "git",
"log",
"--name-status",
"-M",
@@ -124,8 +199,6 @@ def detect_file_renames(
f"{from_ref}..{to_ref}",
],
cwd=project_dir,
- capture_output=True,
- text=True,
)
if result.returncode == 0:
@@ -175,39 +248,21 @@ def get_merge_base(project_dir: Path, ref1: str, ref2: str) -> str | None:
Returns:
Merge-base commit hash, or None if not found
"""
- try:
- result = subprocess.run(
- ["git", "merge-base", ref1, ref2],
- cwd=project_dir,
- capture_output=True,
- text=True,
- )
- if result.returncode == 0:
- return result.stdout.strip()
- except Exception:
- pass
+ result = run_git(["merge-base", ref1, ref2], cwd=project_dir)
+ if result.returncode == 0:
+ return result.stdout.strip()
return None
def has_uncommitted_changes(project_dir: Path) -> bool:
"""Check if user has unsaved work."""
- result = subprocess.run(
- ["git", "status", "--porcelain"],
- cwd=project_dir,
- capture_output=True,
- text=True,
- )
+ result = run_git(["status", "--porcelain"], cwd=project_dir)
return bool(result.stdout.strip())
def get_current_branch(project_dir: Path) -> str:
"""Get the current branch name."""
- result = subprocess.run(
- ["git", "rev-parse", "--abbrev-ref", "HEAD"],
- cwd=project_dir,
- capture_output=True,
- text=True,
- )
+ result = run_git(["rev-parse", "--abbrev-ref", "HEAD"], cwd=project_dir)
return result.stdout.strip()
@@ -222,10 +277,16 @@ def get_existing_build_worktree(project_dir: Path, spec_name: str) -> Path | Non
Returns:
Path to the worktree if it exists for this spec, None otherwise
"""
- # Per-spec worktree path: .worktrees/{spec-name}/
- worktree_path = project_dir / ".worktrees" / spec_name
- if worktree_path.exists():
- return worktree_path
+ # New path first
+ new_path = project_dir / ".auto-claude" / "worktrees" / "tasks" / spec_name
+ if new_path.exists():
+ return new_path
+
+ # Legacy fallback
+ legacy_path = project_dir / ".worktrees" / spec_name
+ if legacy_path.exists():
+ return legacy_path
+
return None
@@ -233,11 +294,29 @@ def get_file_content_from_ref(
project_dir: Path, ref: str, file_path: str
) -> str | None:
"""Get file content from a git ref (branch, commit, etc.)."""
+ result = run_git(["show", f"{ref}:{file_path}"], cwd=project_dir)
+ if result.returncode == 0:
+ return result.stdout
+ return None
+
+
+def get_binary_file_content_from_ref(
+ project_dir: Path, ref: str, file_path: str
+) -> bytes | None:
+ """Get binary file content from a git ref (branch, commit, etc.).
+
+ Unlike get_file_content_from_ref, this returns raw bytes without
+ text decoding, suitable for binary files like images, audio, etc.
+
+ Note: Uses subprocess directly with get_git_executable() since
+ run_git() always returns text output.
+ """
+ git = get_git_executable()
result = subprocess.run(
- ["git", "show", f"{ref}:{file_path}"],
+ [git, "show", f"{ref}:{file_path}"],
cwd=project_dir,
capture_output=True,
- text=True,
+ text=False, # Return bytes, not text
)
if result.returncode == 0:
return result.stdout
@@ -262,11 +341,9 @@ def get_changed_files_from_branch(
Returns:
List of (file_path, status) tuples
"""
- result = subprocess.run(
- ["git", "diff", "--name-status", f"{base_branch}...{spec_branch}"],
+ result = run_git(
+ ["diff", "--name-status", f"{base_branch}...{spec_branch}"],
cwd=project_dir,
- capture_output=True,
- text=True,
)
files = []
@@ -283,15 +360,23 @@ def get_changed_files_from_branch(
return files
+def _normalize_path(path: str) -> str:
+ """Normalize path separators to forward slashes for cross-platform comparison."""
+ return path.replace("\\", "/")
+
+
def _is_auto_claude_file(file_path: str) -> bool:
- """Check if a file is in the .auto-claude or auto-claude/specs directory."""
- # These patterns cover the internal spec/build files that shouldn't be merged
+ """Check if a file is in the .auto-claude or auto-claude/specs directory.
+
+ Handles both forward slashes (Unix/Git output) and backslashes (Windows).
+ """
+ normalized = _normalize_path(file_path)
excluded_patterns = [
".auto-claude/",
"auto-claude/specs/",
]
for pattern in excluded_patterns:
- if file_path.startswith(pattern):
+ if normalized.startswith(pattern):
return True
return False
@@ -485,11 +570,9 @@ def create_conflict_file_with_git(
try:
# git merge-file
# Exit codes: 0 = clean merge, 1 = conflicts, >1 = error
- result = subprocess.run(
- ["git", "merge-file", "-p", main_path, base_path, wt_path],
+ result = run_git(
+ ["merge-file", "-p", main_path, base_path, wt_path],
cwd=project_dir,
- capture_output=True,
- text=True,
)
# Read the merged content
@@ -516,5 +599,6 @@ def create_conflict_file_with_git(
_is_lock_file = is_lock_file
_validate_merged_syntax = validate_merged_syntax
_get_file_content_from_ref = get_file_content_from_ref
+_get_binary_file_content_from_ref = get_binary_file_content_from_ref
_get_changed_files_from_branch = get_changed_files_from_branch
_create_conflict_file_with_git = create_conflict_file_with_git
diff --git a/apps/backend/core/workspace/models.py b/apps/backend/core/workspace/models.py
index cc94413e54..92d2178c95 100644
--- a/apps/backend/core/workspace/models.py
+++ b/apps/backend/core/workspace/models.py
@@ -249,7 +249,7 @@ def get_next_spec_number(self) -> int:
max_number = max(max_number, self._scan_specs_dir(main_specs_dir))
# 2. Scan all worktree specs
- worktrees_dir = self.project_dir / ".worktrees"
+ worktrees_dir = self.project_dir / ".auto-claude" / "worktrees" / "tasks"
if worktrees_dir.exists():
for worktree in worktrees_dir.iterdir():
if worktree.is_dir():
diff --git a/apps/backend/core/workspace/setup.py b/apps/backend/core/workspace/setup.py
index b5b825722b..06269e7c1e 100644
--- a/apps/backend/core/workspace/setup.py
+++ b/apps/backend/core/workspace/setup.py
@@ -8,11 +8,12 @@
import json
import shutil
-import subprocess
import sys
from pathlib import Path
+from core.git_executable import run_git
from merge import FileTimelineTracker
+from security.constants import ALLOWLIST_FILENAME, PROFILE_FILENAME
from ui import (
Icons,
MenuOption,
@@ -267,6 +268,43 @@ def setup_workspace(
f"Environment files copied: {', '.join(copied_env_files)}", "success"
)
+ # Copy security configuration files if they exist
+ # Note: Unlike env files, security files always overwrite to ensure
+ # the worktree uses the same security rules as the main project.
+ # This prevents security bypasses through stale worktree configs.
+ security_files = [
+ ALLOWLIST_FILENAME,
+ PROFILE_FILENAME,
+ ]
+ security_files_copied = []
+
+ for filename in security_files:
+ source_file = project_dir / filename
+ if source_file.is_file():
+ target_file = worktree_info.path / filename
+ try:
+ shutil.copy2(source_file, target_file)
+ security_files_copied.append(filename)
+ except (OSError, PermissionError) as e:
+ debug_warning(MODULE, f"Failed to copy {filename}: {e}")
+ print_status(
+ f"Warning: Could not copy {filename} to worktree", "warning"
+ )
+
+ if security_files_copied:
+ print_status(
+ f"Security config copied: {', '.join(security_files_copied)}", "success"
+ )
+
+ # Ensure .auto-claude/ is in the worktree's .gitignore
+ # This is critical because the worktree inherits .gitignore from the base branch,
+ # which may not have .auto-claude/ if that change wasn't committed/pushed.
+ # Without this, spec files would be committed to the worktree's branch.
+ from init import ensure_gitignore_entry
+
+ if ensure_gitignore_entry(worktree_info.path, ".auto-claude/"):
+ debug(MODULE, "Added .auto-claude/ to worktree's .gitignore")
+
# Copy spec files to worktree if provided
localized_spec_dir = None
if source_spec_dir and source_spec_dir.exists():
@@ -368,11 +406,9 @@ def initialize_timeline_tracking(
files_to_modify.extend(subtask.get("files", []))
# Get the current branch point commit
- result = subprocess.run(
- ["git", "rev-parse", "HEAD"],
+ result = run_git(
+ ["rev-parse", "HEAD"],
cwd=project_dir,
- capture_output=True,
- text=True,
)
branch_point = result.stdout.strip() if result.returncode == 0 else None
diff --git a/apps/backend/core/worktree.py b/apps/backend/core/worktree.py
index ab3b89e3b3..eb4870dd7b 100644
--- a/apps/backend/core/worktree.py
+++ b/apps/backend/core/worktree.py
@@ -4,7 +4,7 @@
=============================================
Each spec gets its own worktree:
-- Worktree path: .worktrees/{spec-name}/
+- Worktree path: .auto-claude/worktrees/tasks/{spec-name}/
- Branch name: auto-claude/{spec-name}
This allows:
@@ -19,8 +19,126 @@
import re
import shutil
import subprocess
+import time
+from collections.abc import Callable
from dataclasses import dataclass
+from datetime import datetime
from pathlib import Path
+from typing import TypedDict, TypeVar
+
+from core.git_executable import get_git_executable, run_git
+from debug import debug_warning
+
+T = TypeVar("T")
+
+
+def _is_retryable_network_error(stderr: str) -> bool:
+ """Check if an error is a retryable network/connection issue."""
+ stderr_lower = stderr.lower()
+ return any(
+ term in stderr_lower
+ for term in ["connection", "network", "timeout", "reset", "refused"]
+ )
+
+
+def _is_retryable_http_error(stderr: str) -> bool:
+ """
+ Check if an HTTP error is retryable (5xx errors, timeouts).
+ Excludes auth errors (401, 403) and client errors (404, 422).
+ """
+ stderr_lower = stderr.lower()
+ # Check for HTTP 5xx errors (server errors are retryable)
+ if re.search(r"http[s]?\s*5\d{2}", stderr_lower):
+ return True
+ # Check for HTTP timeout patterns
+ if "http" in stderr_lower and "timeout" in stderr_lower:
+ return True
+ return False
+
+
+def _with_retry(
+ operation: Callable[[], tuple[bool, T | None, str]],
+ max_retries: int = 3,
+ is_retryable: Callable[[str], bool] | None = None,
+ on_retry: Callable[[int, str], None] | None = None,
+) -> tuple[T | None, str]:
+ """
+ Execute an operation with retry logic.
+
+ Args:
+ operation: Function that returns a tuple of (success: bool, result: T | None, error: str).
+ On success (success=True), result contains the value and error is empty.
+ On failure (success=False), result is None and error contains the message.
+ max_retries: Maximum number of retry attempts
+ is_retryable: Function to check if error is retryable based on error message
+ on_retry: Optional callback called before each retry with (attempt, error)
+
+ Returns:
+ Tuple of (result, last_error) where result is T on success, None on failure
+ """
+ last_error = ""
+
+ for attempt in range(1, max_retries + 1):
+ try:
+ success, result, error = operation()
+ if success:
+ return result, ""
+
+ last_error = error
+
+ # Check if error is retryable
+ if is_retryable and attempt < max_retries and is_retryable(error):
+ if on_retry:
+ on_retry(attempt, error)
+ backoff = 2 ** (attempt - 1)
+ time.sleep(backoff)
+ continue
+
+ break
+
+ except subprocess.TimeoutExpired:
+ last_error = "Operation timed out"
+ if attempt < max_retries:
+ if on_retry:
+ on_retry(attempt, last_error)
+ backoff = 2 ** (attempt - 1)
+ time.sleep(backoff)
+ continue
+ break
+
+ return None, last_error
+
+
+class PushBranchResult(TypedDict, total=False):
+ """Result of pushing a branch to remote."""
+
+ success: bool
+ branch: str
+ remote: str
+ error: str
+
+
+class PullRequestResult(TypedDict, total=False):
+ """Result of creating a pull request."""
+
+ success: bool
+ pr_url: str | None # None when PR was created but URL couldn't be extracted
+ already_exists: bool
+ error: str
+ message: str
+
+
+class PushAndCreatePRResult(TypedDict, total=False):
+ """Result of push_and_create_pr operation."""
+
+ success: bool
+ pushed: bool
+ remote: str
+ branch: str
+ pr_url: str | None # None when PR was created but URL couldn't be extracted
+ already_exists: bool
+ error: str
+ message: str
class WorktreeError(Exception):
@@ -42,20 +160,27 @@ class WorktreeInfo:
files_changed: int = 0
additions: int = 0
deletions: int = 0
+ last_commit_date: datetime | None = None
+ days_since_last_commit: int | None = None
class WorktreeManager:
"""
Manages per-spec Git worktrees.
- Each spec gets its own worktree in .worktrees/{spec-name}/ with
+ Each spec gets its own worktree in .auto-claude/worktrees/tasks/{spec-name}/ with
a corresponding branch auto-claude/{spec-name}.
"""
+ # Timeout constants for subprocess operations
+ GIT_PUSH_TIMEOUT = 120 # 2 minutes for git push (network operations)
+ GH_CLI_TIMEOUT = 60 # 1 minute for gh CLI commands
+ GH_QUERY_TIMEOUT = 30 # 30 seconds for gh CLI queries
+
def __init__(self, project_dir: Path, base_branch: str | None = None):
self.project_dir = project_dir
self.base_branch = base_branch or self._detect_base_branch()
- self.worktrees_dir = project_dir / ".worktrees"
+ self.worktrees_dir = project_dir / ".auto-claude" / "worktrees" / "tasks"
self._merge_lock = asyncio.Lock()
def _detect_base_branch(self) -> str:
@@ -74,13 +199,9 @@ def _detect_base_branch(self) -> str:
env_branch = os.getenv("DEFAULT_BRANCH")
if env_branch:
# Verify the branch exists
- result = subprocess.run(
- ["git", "rev-parse", "--verify", env_branch],
+ result = run_git(
+ ["rev-parse", "--verify", env_branch],
cwd=self.project_dir,
- capture_output=True,
- text=True,
- encoding="utf-8",
- errors="replace",
)
if result.returncode == 0:
return env_branch
@@ -91,13 +212,9 @@ def _detect_base_branch(self) -> str:
# 2. Auto-detect main/master
for branch in ["main", "master"]:
- result = subprocess.run(
- ["git", "rev-parse", "--verify", branch],
+ result = run_git(
+ ["rev-parse", "--verify", branch],
cwd=self.project_dir,
- capture_output=True,
- text=True,
- encoding="utf-8",
- errors="replace",
)
if result.returncode == 0:
return branch
@@ -111,30 +228,29 @@ def _detect_base_branch(self) -> str:
def _get_current_branch(self) -> str:
"""Get the current git branch."""
- result = subprocess.run(
- ["git", "rev-parse", "--abbrev-ref", "HEAD"],
+ result = run_git(
+ ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=self.project_dir,
- capture_output=True,
- text=True,
- encoding="utf-8",
- errors="replace",
)
if result.returncode != 0:
raise WorktreeError(f"Failed to get current branch: {result.stderr}")
return result.stdout.strip()
def _run_git(
- self, args: list[str], cwd: Path | None = None
+ self, args: list[str], cwd: Path | None = None, timeout: int = 60
) -> subprocess.CompletedProcess:
- """Run a git command and return the result."""
- return subprocess.run(
- ["git"] + args,
- cwd=cwd or self.project_dir,
- capture_output=True,
- text=True,
- encoding="utf-8",
- errors="replace",
- )
+ """Run a git command and return the result.
+
+ Args:
+ args: Git command arguments (without 'git' prefix)
+ cwd: Working directory for the command
+ timeout: Command timeout in seconds (default: 60)
+
+ Returns:
+ CompletedProcess with command results. On timeout, returns a
+ CompletedProcess with returncode=-1 and timeout error in stderr.
+ """
+ return run_git(args, cwd=cwd or self.project_dir, timeout=timeout)
def _unstage_gitignored_files(self) -> None:
"""
@@ -157,14 +273,10 @@ def _unstage_gitignored_files(self) -> None:
# 1. Check which staged files are gitignored
# git check-ignore returns the files that ARE ignored
- result = subprocess.run(
- ["git", "check-ignore", "--stdin"],
+ result = run_git(
+ ["check-ignore", "--stdin"],
cwd=self.project_dir,
- input="\n".join(staged_files),
- capture_output=True,
- text=True,
- encoding="utf-8",
- errors="replace",
+ input_data="\n".join(staged_files),
)
if result.stdout.strip():
@@ -179,8 +291,10 @@ def _unstage_gitignored_files(self) -> None:
file = file.strip()
if not file:
continue
+ # Normalize path separators for cross-platform (Windows backslash support)
+ normalized = file.replace("\\", "/")
for pattern in auto_claude_patterns:
- if file.startswith(pattern) or f"/{pattern}" in file:
+ if normalized.startswith(pattern) or f"/{pattern}" in normalized:
files_to_unstage.add(file)
break
@@ -194,13 +308,24 @@ def _unstage_gitignored_files(self) -> None:
def setup(self) -> None:
"""Create worktrees directory if needed."""
- self.worktrees_dir.mkdir(exist_ok=True)
+ self.worktrees_dir.mkdir(parents=True, exist_ok=True)
# ==================== Per-Spec Worktree Methods ====================
def get_worktree_path(self, spec_name: str) -> Path:
- """Get the worktree path for a spec."""
- return self.worktrees_dir / spec_name
+ """Get the worktree path for a spec (checks new and legacy locations)."""
+ # New path first (.auto-claude/worktrees/tasks/)
+ new_path = self.worktrees_dir / spec_name
+ if new_path.exists():
+ return new_path
+
+ # Legacy fallback (.worktrees/ instead of .auto-claude/worktrees/tasks/)
+ legacy_path = self.project_dir / ".worktrees" / spec_name
+ if legacy_path.exists():
+ return legacy_path
+
+ # Return new path as default for creation
+ return new_path
def get_branch_name(self, spec_name: str) -> str:
"""Get the branch name for a spec."""
@@ -261,6 +386,8 @@ def _get_worktree_stats(self, spec_name: str) -> dict:
"files_changed": 0,
"additions": 0,
"deletions": 0,
+ "last_commit_date": None,
+ "days_since_last_commit": None,
}
if not worktree_path.exists():
@@ -273,6 +400,52 @@ def _get_worktree_stats(self, spec_name: str) -> dict:
if result.returncode == 0:
stats["commit_count"] = int(result.stdout.strip() or "0")
+ # Last commit date (most recent commit in this worktree)
+ result = self._run_git(
+ ["log", "-1", "--format=%cd", "--date=iso"], cwd=worktree_path
+ )
+ if result.returncode == 0 and result.stdout.strip():
+ try:
+ # Parse ISO date format: "2026-01-04 00:25:25 +0100"
+ date_str = result.stdout.strip()
+ # Convert git format to ISO format for fromisoformat()
+ # "2026-01-04 00:25:25 +0100" -> "2026-01-04T00:25:25+01:00"
+ parts = date_str.rsplit(" ", 1)
+ if len(parts) == 2:
+ date_part, tz_part = parts
+ # Convert timezone format: "+0100" -> "+01:00"
+ if len(tz_part) == 5 and (
+ tz_part.startswith("+") or tz_part.startswith("-")
+ ):
+ tz_formatted = f"{tz_part[:3]}:{tz_part[3:]}"
+ iso_str = f"{date_part.replace(' ', 'T')}{tz_formatted}"
+ last_commit_date = datetime.fromisoformat(iso_str)
+ stats["last_commit_date"] = last_commit_date
+ # Use timezone-aware now() for accurate comparison
+ now_aware = datetime.now(last_commit_date.tzinfo)
+ stats["days_since_last_commit"] = (
+ now_aware - last_commit_date
+ ).days
+ else:
+ # Fallback for unexpected timezone format
+ last_commit_date = datetime.strptime(
+ parts[0], "%Y-%m-%d %H:%M:%S"
+ )
+ stats["last_commit_date"] = last_commit_date
+ stats["days_since_last_commit"] = (
+ datetime.now() - last_commit_date
+ ).days
+ else:
+ # No timezone in output
+ last_commit_date = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
+ stats["last_commit_date"] = last_commit_date
+ stats["days_since_last_commit"] = (
+ datetime.now() - last_commit_date
+ ).days
+ except (ValueError, TypeError) as e:
+ # If parsing fails, silently continue without date info
+ pass
+
# Diff stats
result = self._run_git(
["diff", "--shortstat", f"{self.base_branch}...HEAD"], cwd=worktree_path
@@ -327,9 +500,33 @@ def create_worktree(self, spec_name: str) -> WorktreeInfo:
# Delete branch if it exists (from previous attempt)
self._run_git(["branch", "-D", branch_name])
- # Create worktree with new branch from base
+ # Fetch latest from remote to ensure we have the most up-to-date code
+ # GitHub/remote is the source of truth, not the local branch
+ fetch_result = self._run_git(["fetch", "origin", self.base_branch])
+ if fetch_result.returncode != 0:
+ print(
+ f"Warning: Could not fetch {self.base_branch} from origin: {fetch_result.stderr}"
+ )
+ print("Falling back to local branch...")
+
+ # Determine the start point for the worktree
+ # Prefer origin/{base_branch} (remote) over local branch to ensure we have latest code
+ remote_ref = f"origin/{self.base_branch}"
+ start_point = self.base_branch # Default to local branch
+
+ # Check if remote ref exists and use it as the source of truth
+ check_remote = self._run_git(["rev-parse", "--verify", remote_ref])
+ if check_remote.returncode == 0:
+ start_point = remote_ref
+ print(f"Creating worktree from remote: {remote_ref}")
+ else:
+ print(
+ f"Remote ref {remote_ref} not found, using local branch: {self.base_branch}"
+ )
+
+ # Create worktree with new branch from the start point (remote preferred)
result = self._run_git(
- ["worktree", "add", "-b", branch_name, str(worktree_path), self.base_branch]
+ ["worktree", "add", "-b", branch_name, str(worktree_path), start_point]
)
if result.returncode != 0:
@@ -475,17 +672,27 @@ def commit_in_worktree(self, spec_name: str, message: str) -> bool:
# ==================== Listing & Discovery ====================
def list_all_worktrees(self) -> list[WorktreeInfo]:
- """List all spec worktrees."""
+ """List all spec worktrees (includes legacy .worktrees/ location)."""
worktrees = []
-
- if not self.worktrees_dir.exists():
- return worktrees
-
- for item in self.worktrees_dir.iterdir():
- if item.is_dir():
- info = self.get_worktree_info(item.name)
- if info:
- worktrees.append(info)
+ seen_specs = set()
+
+ # Check new location first
+ if self.worktrees_dir.exists():
+ for item in self.worktrees_dir.iterdir():
+ if item.is_dir():
+ info = self.get_worktree_info(item.name)
+ if info:
+ worktrees.append(info)
+ seen_specs.add(item.name)
+
+ # Check legacy location (.worktrees/)
+ legacy_dir = self.project_dir / ".worktrees"
+ if legacy_dir.exists():
+ for item in legacy_dir.iterdir():
+ if item.is_dir() and item.name not in seen_specs:
+ info = self.get_worktree_info(item.name)
+ if info:
+ worktrees.append(info)
return worktrees
@@ -587,81 +794,544 @@ def get_test_commands(self, spec_name: str) -> list[str]:
return commands
- # ==================== Backward Compatibility ====================
- # These methods provide backward compatibility with the old single-worktree API
+ def has_uncommitted_changes(self, spec_name: str | None = None) -> bool:
+ """Check if there are uncommitted changes."""
+ cwd = None
+ if spec_name:
+ worktree_path = self.get_worktree_path(spec_name)
+ if worktree_path.exists():
+ cwd = worktree_path
+ result = self._run_git(["status", "--porcelain"], cwd=cwd)
+ return bool(result.stdout.strip())
+
+ # ==================== PR Creation Methods ====================
- def get_staging_path(self) -> Path | None:
+ def push_branch(self, spec_name: str, force: bool = False) -> PushBranchResult:
"""
- Backward compatibility: Get path to any existing spec worktree.
- Prefer using get_worktree_path(spec_name) instead.
+ Push a spec's branch to the remote origin with retry logic.
+
+ Args:
+ spec_name: The spec folder name
+ force: Whether to force push (use with caution)
+
+ Returns:
+ PushBranchResult with keys:
+ - success: bool
+ - branch: str (branch name)
+ - remote: str (if successful)
+ - error: str (if failed)
"""
- worktrees = self.list_all_worktrees()
- if worktrees:
- return worktrees[0].path
- return None
+ info = self.get_worktree_info(spec_name)
+ if not info:
+ return PushBranchResult(
+ success=False,
+ error=f"No worktree found for spec: {spec_name}",
+ )
+
+ # Push the branch to origin
+ push_args = ["push", "-u", "origin", info.branch]
+ if force:
+ push_args.insert(1, "--force")
+
+ def do_push() -> tuple[bool, PushBranchResult | None, str]:
+ """Execute push operation for retry wrapper."""
+ try:
+ git_executable = get_git_executable()
+ result = subprocess.run(
+ [git_executable] + push_args,
+ cwd=info.path,
+ capture_output=True,
+ text=True,
+ encoding="utf-8",
+ errors="replace",
+ timeout=self.GIT_PUSH_TIMEOUT,
+ )
+
+ if result.returncode == 0:
+ return (
+ True,
+ PushBranchResult(
+ success=True,
+ branch=info.branch,
+ remote="origin",
+ ),
+ "",
+ )
+ return (False, None, result.stderr)
+ except FileNotFoundError:
+ return (False, None, "git executable not found")
+
+ max_retries = 3
+ result, last_error = _with_retry(
+ operation=do_push,
+ max_retries=max_retries,
+ is_retryable=_is_retryable_network_error,
+ )
- def get_staging_info(self) -> WorktreeInfo | None:
+ if result:
+ return result
+
+ # Handle timeout error message
+ if last_error == "Operation timed out":
+ return PushBranchResult(
+ success=False,
+ branch=info.branch,
+ error=f"Push timed out after {max_retries} attempts.",
+ )
+
+ return PushBranchResult(
+ success=False,
+ branch=info.branch,
+ error=f"Failed to push branch: {last_error}",
+ )
+
+ def create_pull_request(
+ self,
+ spec_name: str,
+ target_branch: str | None = None,
+ title: str | None = None,
+ draft: bool = False,
+ ) -> PullRequestResult:
"""
- Backward compatibility: Get info about any existing spec worktree.
- Prefer using get_worktree_info(spec_name) instead.
+ Create a GitHub pull request for a spec's branch using gh CLI with retry logic.
+
+ Args:
+ spec_name: The spec folder name
+ target_branch: Target branch for PR (defaults to base_branch)
+ title: PR title (defaults to spec name)
+ draft: Whether to create as draft PR
+
+ Returns:
+ PullRequestResult with keys:
+ - success: bool
+ - pr_url: str (if created)
+ - already_exists: bool (if PR already exists)
+ - error: str (if failed)
"""
- worktrees = self.list_all_worktrees()
- if worktrees:
- return worktrees[0]
+ info = self.get_worktree_info(spec_name)
+ if not info:
+ return PullRequestResult(
+ success=False,
+ error=f"No worktree found for spec: {spec_name}",
+ )
+
+ target = target_branch or self.base_branch
+ pr_title = title or f"auto-claude: {spec_name}"
+
+ # Get PR body from spec.md if available
+ pr_body = self._extract_spec_summary(spec_name)
+
+ # Build gh pr create command
+ gh_args = [
+ "gh",
+ "pr",
+ "create",
+ "--base",
+ target,
+ "--head",
+ info.branch,
+ "--title",
+ pr_title,
+ "--body",
+ pr_body,
+ ]
+ if draft:
+ gh_args.append("--draft")
+
+ def is_pr_retryable(stderr: str) -> bool:
+ """Check if PR creation error is retryable (network or HTTP 5xx)."""
+ return _is_retryable_network_error(stderr) or _is_retryable_http_error(
+ stderr
+ )
+
+ def do_create_pr() -> tuple[bool, PullRequestResult | None, str]:
+ """Execute PR creation for retry wrapper."""
+ try:
+ result = subprocess.run(
+ gh_args,
+ cwd=info.path,
+ capture_output=True,
+ text=True,
+ encoding="utf-8",
+ errors="replace",
+ timeout=self.GH_CLI_TIMEOUT,
+ )
+
+ # Check for "already exists" case (success, no retry needed)
+ if result.returncode != 0 and "already exists" in result.stderr.lower():
+ existing_url = self._get_existing_pr_url(spec_name, target)
+ result_dict = PullRequestResult(
+ success=True,
+ pr_url=existing_url,
+ already_exists=True,
+ )
+ if existing_url is None:
+ result_dict["message"] = (
+ "PR already exists but URL could not be retrieved"
+ )
+ return (True, result_dict, "")
+
+ if result.returncode == 0:
+ # Extract PR URL from output
+ pr_url: str | None = result.stdout.strip()
+ if not pr_url.startswith("http"):
+ # Try to find URL in output
+ # Use general pattern to support GitHub Enterprise instances
+ # Matches any HTTPS URL with /pull/ path
+ match = re.search(r"https://[^\s]+/pull/\d+", result.stdout)
+ if match:
+ pr_url = match.group(0)
+ else:
+ # Invalid output - no valid URL found
+ pr_url = None
+
+ return (
+ True,
+ PullRequestResult(
+ success=True,
+ pr_url=pr_url,
+ already_exists=False,
+ ),
+ "",
+ )
+
+ return (False, None, result.stderr)
+
+ except FileNotFoundError:
+ # gh CLI not installed - not retryable, raise to exit retry loop
+ raise
+
+ max_retries = 3
+ try:
+ result, last_error = _with_retry(
+ operation=do_create_pr,
+ max_retries=max_retries,
+ is_retryable=is_pr_retryable,
+ )
+
+ if result:
+ return result
+
+ # Handle timeout error message
+ if last_error == "Operation timed out":
+ return PullRequestResult(
+ success=False,
+ error=f"PR creation timed out after {max_retries} attempts.",
+ )
+
+ return PullRequestResult(
+ success=False,
+ error=f"Failed to create PR: {last_error}",
+ )
+
+ except FileNotFoundError:
+ # gh CLI not installed
+ return PullRequestResult(
+ success=False,
+ error="gh CLI not found. Install from https://cli.github.com/",
+ )
+
+ def _extract_spec_summary(self, spec_name: str) -> str:
+ """Extract a summary from spec.md for PR body."""
+ worktree_path = self.get_worktree_path(spec_name)
+ spec_path = worktree_path / ".auto-claude" / "specs" / spec_name / "spec.md"
+
+ if not spec_path.exists():
+ # Try project spec path
+ spec_path = (
+ self.project_dir / ".auto-claude" / "specs" / spec_name / "spec.md"
+ )
+
+ if not spec_path.exists():
+ return "Auto-generated PR from Auto-Claude build."
+
+ try:
+ content = spec_path.read_text(encoding="utf-8")
+ # Extract first few paragraphs (skip title, get overview)
+ lines = content.split("\n")
+ summary_lines = []
+ in_content = False
+
+ for line in lines:
+ # Skip title headers
+ if line.startswith("# "):
+ continue
+ # Start capturing after first content line
+ if line.strip() and not line.startswith("#"):
+ in_content = True
+ if in_content:
+ if line.startswith("## ") and summary_lines:
+ break # Stop at next section
+ summary_lines.append(line)
+ if len(summary_lines) >= 10: # Limit to ~10 lines
+ break
+
+ summary = "\n".join(summary_lines).strip()
+ if summary:
+ return summary
+ except (OSError, UnicodeDecodeError) as e:
+ # Silently fall back to default - file read errors shouldn't block PR creation
+ debug_warning(
+ "worktree", f"Could not extract spec summary for PR body: {e}"
+ )
+
+ return "Auto-generated PR from Auto-Claude build."
+
+ def _get_existing_pr_url(self, spec_name: str, target_branch: str) -> str | None:
+ """Get the URL of an existing PR for this branch."""
+ info = self.get_worktree_info(spec_name)
+ if not info:
+ return None
+
+ try:
+ result = subprocess.run(
+ ["gh", "pr", "view", info.branch, "--json", "url", "--jq", ".url"],
+ cwd=info.path,
+ capture_output=True,
+ text=True,
+ encoding="utf-8",
+ errors="replace",
+ timeout=self.GH_QUERY_TIMEOUT,
+ )
+ if result.returncode == 0:
+ return result.stdout.strip()
+ except (
+ subprocess.TimeoutExpired,
+ FileNotFoundError,
+ subprocess.SubprocessError,
+ ) as e:
+ # Silently ignore errors when fetching existing PR URL - this is a best-effort
+ # lookup that may fail due to network issues, missing gh CLI, or auth problems.
+ # Returning None allows the caller to handle missing URLs gracefully.
+ debug_warning("worktree", f"Could not get existing PR URL: {e}")
+
return None
- def merge_staging(self, delete_after: bool = True) -> bool:
- """
- Backward compatibility: Merge first found worktree.
- Prefer using merge_worktree(spec_name) instead.
+ def push_and_create_pr(
+ self,
+ spec_name: str,
+ target_branch: str | None = None,
+ title: str | None = None,
+ draft: bool = False,
+ force_push: bool = False,
+ ) -> PushAndCreatePRResult:
"""
- worktrees = self.list_all_worktrees()
- if worktrees:
- return self.merge_worktree(worktrees[0].spec_name, delete_after)
- return False
+ Push branch and create a pull request in one operation.
- def remove_staging(self, delete_branch: bool = True) -> None:
- """
- Backward compatibility: Remove first found worktree.
- Prefer using remove_worktree(spec_name) instead.
+ Args:
+ spec_name: The spec folder name
+ target_branch: Target branch for PR (defaults to base_branch)
+ title: PR title (defaults to spec name)
+ draft: Whether to create as draft PR
+ force_push: Whether to force push the branch
+
+ Returns:
+ PushAndCreatePRResult with keys:
+ - success: bool
+ - pr_url: str (if created)
+ - pushed: bool (if push succeeded)
+ - already_exists: bool (if PR already exists)
+ - error: str (if failed)
"""
- worktrees = self.list_all_worktrees()
- if worktrees:
- self.remove_worktree(worktrees[0].spec_name, delete_branch)
+ # Step 1: Push the branch
+ push_result = self.push_branch(spec_name, force=force_push)
+ if not push_result.get("success"):
+ return PushAndCreatePRResult(
+ success=False,
+ pushed=False,
+ error=push_result.get("error", "Push failed"),
+ )
- def get_or_create_staging(self, spec_name: str) -> WorktreeInfo:
+ # Step 2: Create the PR
+ pr_result = self.create_pull_request(
+ spec_name=spec_name,
+ target_branch=target_branch,
+ title=title,
+ draft=draft,
+ )
+
+ # Combine results
+ return PushAndCreatePRResult(
+ success=pr_result.get("success", False),
+ pushed=True,
+ remote=push_result.get("remote"),
+ branch=push_result.get("branch"),
+ pr_url=pr_result.get("pr_url"),
+ already_exists=pr_result.get("already_exists", False),
+ error=pr_result.get("error"),
+ )
+
+ # ==================== Worktree Cleanup Methods ====================
+
+ def get_old_worktrees(
+ self, days_threshold: int = 30, include_stats: bool = False
+ ) -> list[WorktreeInfo] | list[str]:
"""
- Backward compatibility: Alias for get_or_create_worktree.
+ Find worktrees that haven't been modified in the specified number of days.
+
+ Args:
+ days_threshold: Number of days without activity to consider a worktree old (default: 30)
+ include_stats: If True, return full WorktreeInfo objects; if False, return just spec names
+
+ Returns:
+ List of old worktrees (either WorktreeInfo objects or spec names based on include_stats)
"""
- return self.get_or_create_worktree(spec_name)
+ old_worktrees = []
+
+ for worktree_info in self.list_all_worktrees():
+ # Skip if we can't determine age
+ if worktree_info.days_since_last_commit is None:
+ continue
+
+ if worktree_info.days_since_last_commit >= days_threshold:
+ if include_stats:
+ old_worktrees.append(worktree_info)
+ else:
+ old_worktrees.append(worktree_info.spec_name)
- def staging_exists(self) -> bool:
+ return old_worktrees
+
+ def cleanup_old_worktrees(
+ self, days_threshold: int = 30, dry_run: bool = False
+ ) -> tuple[list[str], list[str]]:
"""
- Backward compatibility: Check if any spec worktree exists.
- Prefer using worktree_exists(spec_name) instead.
+ Remove worktrees that haven't been modified in the specified number of days.
+
+ Args:
+ days_threshold: Number of days without activity to consider a worktree old (default: 30)
+ dry_run: If True, only report what would be removed without actually removing
+
+ Returns:
+ Tuple of (removed_specs, failed_specs) containing spec names
"""
- return len(self.list_all_worktrees()) > 0
+ old_worktrees = self.get_old_worktrees(
+ days_threshold=days_threshold, include_stats=True
+ )
+
+ if not old_worktrees:
+ print(f"No worktrees found older than {days_threshold} days.")
+ return ([], [])
+
+ removed = []
+ failed = []
+
+ if dry_run:
+ print(f"\n[DRY RUN] Would remove {len(old_worktrees)} old worktrees:")
+ for info in old_worktrees:
+ print(
+ f" - {info.spec_name} (last activity: {info.days_since_last_commit} days ago)"
+ )
+ return ([], [])
- def commit_in_staging(self, message: str) -> bool:
+ print(f"\nRemoving {len(old_worktrees)} old worktrees...")
+ for info in old_worktrees:
+ try:
+ self.remove_worktree(info.spec_name, delete_branch=True)
+ removed.append(info.spec_name)
+ print(
+ f" ✓ Removed {info.spec_name} (last activity: {info.days_since_last_commit} days ago)"
+ )
+ except Exception as e:
+ failed.append(info.spec_name)
+ print(f" ✗ Failed to remove {info.spec_name}: {e}")
+
+ if removed:
+ print(f"\nSuccessfully removed {len(removed)} worktree(s).")
+ if failed:
+ print(f"Failed to remove {len(failed)} worktree(s).")
+
+ return (removed, failed)
+
+ def get_worktree_count_warning(
+ self, warning_threshold: int = 10, critical_threshold: int = 20
+ ) -> str | None:
"""
- Backward compatibility: Commit in first found worktree.
- Prefer using commit_in_worktree(spec_name, message) instead.
+ Check worktree count and return a warning message if threshold is exceeded.
+
+ Args:
+ warning_threshold: Number of worktrees to trigger a warning (default: 10)
+ critical_threshold: Number of worktrees to trigger a critical warning (default: 20)
+
+ Returns:
+ Warning message string if threshold exceeded, None otherwise
"""
worktrees = self.list_all_worktrees()
- if worktrees:
- return self.commit_in_worktree(worktrees[0].spec_name, message)
- return False
+ count = len(worktrees)
+
+ if count >= critical_threshold:
+ old_worktrees = self.get_old_worktrees(days_threshold=30)
+ old_count = len(old_worktrees)
+ return (
+ f"CRITICAL: {count} worktrees detected! "
+ f"Consider cleaning up old worktrees ({old_count} are 30+ days old). "
+ f"Run cleanup to remove stale worktrees."
+ )
+ elif count >= warning_threshold:
+ old_worktrees = self.get_old_worktrees(days_threshold=30)
+ old_count = len(old_worktrees)
+ return (
+ f"WARNING: {count} worktrees detected. "
+ f"{old_count} are 30+ days old and may be safe to clean up."
+ )
- def has_uncommitted_changes(self, in_staging: bool = False) -> bool:
- """Check if there are uncommitted changes."""
+ return None
+
+ def print_worktree_summary(self) -> None:
+ """Print a summary of all worktrees with age information."""
worktrees = self.list_all_worktrees()
- if in_staging and worktrees:
- cwd = worktrees[0].path
- else:
- cwd = None
- result = self._run_git(["status", "--porcelain"], cwd=cwd)
- return bool(result.stdout.strip())
+ if not worktrees:
+ print("No worktrees found.")
+ return
-# Keep STAGING_WORKTREE_NAME for backward compatibility in imports
-STAGING_WORKTREE_NAME = "auto-claude"
+ print(f"\n{'=' * 80}")
+ print(f"Worktree Summary ({len(worktrees)} total)")
+ print(f"{'=' * 80}\n")
+
+ # Group by age
+ recent = [] # < 7 days
+ week_old = [] # 7-30 days
+ month_old = [] # 30-90 days
+ very_old = [] # > 90 days
+ unknown_age = []
+
+ for info in worktrees:
+ if info.days_since_last_commit is None:
+ unknown_age.append(info)
+ elif info.days_since_last_commit < 7:
+ recent.append(info)
+ elif info.days_since_last_commit < 30:
+ week_old.append(info)
+ elif info.days_since_last_commit < 90:
+ month_old.append(info)
+ else:
+ very_old.append(info)
+
+ def print_group(title: str, items: list[WorktreeInfo]):
+ if not items:
+ return
+ print(f"{title} ({len(items)}):")
+ for info in sorted(items, key=lambda x: x.spec_name):
+ age_str = (
+ f"{info.days_since_last_commit}d ago"
+ if info.days_since_last_commit is not None
+ else "unknown"
+ )
+ print(f" - {info.spec_name} (last activity: {age_str})")
+ print()
+
+ print_group("Recent (< 7 days)", recent)
+ print_group("Week Old (7-30 days)", week_old)
+ print_group("Month Old (30-90 days)", month_old)
+ print_group("Very Old (> 90 days)", very_old)
+ print_group("Unknown Age", unknown_age)
+
+ # Print cleanup suggestions
+ if month_old or very_old:
+ total_old = len(month_old) + len(very_old)
+ print(f"{'=' * 80}")
+ print(
+ f"💡 Suggestion: {total_old} worktree(s) are 30+ days old and may be safe to clean up."
+ )
+ print(" Review these worktrees and run cleanup if no longer needed.")
+ print(f"{'=' * 80}\n")
diff --git a/apps/backend/ideation/config.py b/apps/backend/ideation/config.py
index 9f650b78da..0f56a893d3 100644
--- a/apps/backend/ideation/config.py
+++ b/apps/backend/ideation/config.py
@@ -25,7 +25,7 @@ def __init__(
include_roadmap_context: bool = True,
include_kanban_context: bool = True,
max_ideas_per_type: int = 5,
- model: str = "claude-opus-4-5-20251101",
+ model: str = "sonnet", # Changed from "opus" (fix #433)
thinking_level: str = "medium",
refresh: bool = False,
append: bool = False,
diff --git a/apps/backend/ideation/generator.py b/apps/backend/ideation/generator.py
index 4e3005040e..dcd347041b 100644
--- a/apps/backend/ideation/generator.py
+++ b/apps/backend/ideation/generator.py
@@ -17,7 +17,7 @@
sys.path.insert(0, str(Path(__file__).parent.parent))
from client import create_client
-from phase_config import get_thinking_budget
+from phase_config import get_thinking_budget, resolve_model_id
from ui import print_status
# Ideation types
@@ -56,7 +56,7 @@ def __init__(
self,
project_dir: Path,
output_dir: Path,
- model: str = "claude-opus-4-5-20251101",
+ model: str = "sonnet", # Changed from "opus" (fix #433)
thinking_level: str = "medium",
max_ideas_per_type: int = 5,
):
@@ -94,7 +94,7 @@ async def run_agent(
client = create_client(
self.project_dir,
self.output_dir,
- self.model,
+ resolve_model_id(self.model),
max_thinking_tokens=self.thinking_budget,
)
@@ -187,7 +187,7 @@ async def run_recovery_agent(
client = create_client(
self.project_dir,
self.output_dir,
- self.model,
+ resolve_model_id(self.model),
max_thinking_tokens=self.thinking_budget,
)
diff --git a/apps/backend/ideation/runner.py b/apps/backend/ideation/runner.py
index 1e1537037a..c20d41f839 100644
--- a/apps/backend/ideation/runner.py
+++ b/apps/backend/ideation/runner.py
@@ -41,7 +41,7 @@ def __init__(
include_roadmap_context: bool = True,
include_kanban_context: bool = True,
max_ideas_per_type: int = 5,
- model: str = "claude-opus-4-5-20251101",
+ model: str = "sonnet", # Changed from "opus" (fix #433)
thinking_level: str = "medium",
refresh: bool = False,
append: bool = False,
diff --git a/apps/backend/ideation/types.py b/apps/backend/ideation/types.py
index 7180f1e0f0..c2c391d630 100644
--- a/apps/backend/ideation/types.py
+++ b/apps/backend/ideation/types.py
@@ -31,6 +31,6 @@ class IdeationConfig:
include_roadmap_context: bool = True
include_kanban_context: bool = True
max_ideas_per_type: int = 5
- model: str = "claude-opus-4-5-20251101"
+ model: str = "sonnet" # Changed from "opus" (fix #433)
refresh: bool = False
append: bool = False # If True, preserve existing ideas when merging
diff --git a/apps/backend/init.py b/apps/backend/init.py
index c6aee373d4..5f1962b44e 100644
--- a/apps/backend/init.py
+++ b/apps/backend/init.py
@@ -6,6 +6,32 @@
from pathlib import Path
+# All entries that should be added to .gitignore for auto-claude projects
+AUTO_CLAUDE_GITIGNORE_ENTRIES = [
+ ".auto-claude/",
+ ".auto-claude-security.json",
+ ".auto-claude-status",
+ ".claude_settings.json",
+ ".worktrees/",
+ ".security-key",
+ "logs/security/",
+]
+
+
+def _entry_exists_in_gitignore(lines: list[str], entry: str) -> bool:
+ """Check if an entry already exists in gitignore (handles trailing slash variations)."""
+ entry_normalized = entry.rstrip("/")
+ for line in lines:
+ line_stripped = line.strip()
+ # Match both "entry" and "entry/"
+ if (
+ line_stripped == entry
+ or line_stripped == entry_normalized
+ or line_stripped == entry_normalized + "/"
+ ):
+ return True
+ return False
+
def ensure_gitignore_entry(project_dir: Path, entry: str = ".auto-claude/") -> bool:
"""
@@ -27,17 +53,8 @@ def ensure_gitignore_entry(project_dir: Path, entry: str = ".auto-claude/") -> b
content = gitignore_path.read_text()
lines = content.splitlines()
- # Check if entry already exists (exact match or with trailing newline variations)
- entry_normalized = entry.rstrip("/")
- for line in lines:
- line_stripped = line.strip()
- # Match both ".auto-claude" and ".auto-claude/"
- if (
- line_stripped == entry
- or line_stripped == entry_normalized
- or line_stripped == entry_normalized + "/"
- ):
- return False # Already exists
+ if _entry_exists_in_gitignore(lines, entry):
+ return False # Already exists
# Entry doesn't exist, append it
# Ensure file ends with newline before adding our entry
@@ -59,11 +76,58 @@ def ensure_gitignore_entry(project_dir: Path, entry: str = ".auto-claude/") -> b
return True
+def ensure_all_gitignore_entries(project_dir: Path) -> list[str]:
+ """
+ Ensure all auto-claude related entries exist in the project's .gitignore file.
+
+ Creates .gitignore if it doesn't exist.
+
+ Args:
+ project_dir: The project root directory
+
+ Returns:
+ List of entries that were added (empty if all already existed)
+ """
+ gitignore_path = project_dir / ".gitignore"
+ added_entries: list[str] = []
+
+ # Read existing content or start fresh
+ if gitignore_path.exists():
+ content = gitignore_path.read_text()
+ lines = content.splitlines()
+ else:
+ content = ""
+ lines = []
+
+ # Find entries that need to be added
+ entries_to_add = [
+ entry
+ for entry in AUTO_CLAUDE_GITIGNORE_ENTRIES
+ if not _entry_exists_in_gitignore(lines, entry)
+ ]
+
+ if not entries_to_add:
+ return []
+
+ # Build the new content to append
+ # Ensure file ends with newline before adding our entries
+ if content and not content.endswith("\n"):
+ content += "\n"
+
+ content += "\n# Auto Claude generated files\n"
+ for entry in entries_to_add:
+ content += entry + "\n"
+ added_entries.append(entry)
+
+ gitignore_path.write_text(content)
+ return added_entries
+
+
def init_auto_claude_dir(project_dir: Path) -> tuple[Path, bool]:
"""
Initialize the .auto-claude directory for a project.
- Creates the directory if needed and ensures it's in .gitignore.
+ Creates the directory if needed and ensures all auto-claude files are in .gitignore.
Args:
project_dir: The project root directory
@@ -78,16 +142,18 @@ def init_auto_claude_dir(project_dir: Path) -> tuple[Path, bool]:
dir_created = not auto_claude_dir.exists()
auto_claude_dir.mkdir(parents=True, exist_ok=True)
- # Ensure .auto-claude is in .gitignore (only on first creation)
+ # Ensure all auto-claude entries are in .gitignore (only on first creation)
gitignore_updated = False
if dir_created:
- gitignore_updated = ensure_gitignore_entry(project_dir, ".auto-claude/")
+ added = ensure_all_gitignore_entries(project_dir)
+ gitignore_updated = len(added) > 0
else:
# Even if dir exists, check gitignore on first run
# Use a marker file to track if we've already checked
marker = auto_claude_dir / ".gitignore_checked"
if not marker.exists():
- gitignore_updated = ensure_gitignore_entry(project_dir, ".auto-claude/")
+ added = ensure_all_gitignore_entries(project_dir)
+ gitignore_updated = len(added) > 0
marker.touch()
return auto_claude_dir, gitignore_updated
@@ -109,3 +175,36 @@ def get_auto_claude_dir(project_dir: Path, ensure_exists: bool = True) -> Path:
return auto_claude_dir
return Path(project_dir) / ".auto-claude"
+
+
+def repair_gitignore(project_dir: Path) -> list[str]:
+ """
+ Repair an existing project's .gitignore to include all auto-claude entries.
+
+ This is useful for projects created before all entries were being added,
+ or when gitignore entries were manually removed.
+
+ Also resets the .gitignore_checked marker to allow future updates.
+
+ Args:
+ project_dir: The project root directory
+
+ Returns:
+ List of entries that were added (empty if all already existed)
+ """
+ project_dir = Path(project_dir)
+ auto_claude_dir = project_dir / ".auto-claude"
+
+ # Remove the marker file so future checks will also run
+ marker = auto_claude_dir / ".gitignore_checked"
+ if marker.exists():
+ marker.unlink()
+
+ # Add all missing entries
+ added = ensure_all_gitignore_entries(project_dir)
+
+ # Re-create the marker
+ if auto_claude_dir.exists():
+ marker.touch()
+
+ return added
diff --git a/apps/backend/integrations/graphiti/config.py b/apps/backend/integrations/graphiti/config.py
index f2af6fd32f..4dbbc3e61e 100644
--- a/apps/backend/integrations/graphiti/config.py
+++ b/apps/backend/integrations/graphiti/config.py
@@ -622,10 +622,23 @@ def get_graphiti_status() -> dict:
status["errors"] = errors
# Errors are informational - embedder is optional (keyword search fallback)
- # Available if is_valid() returns True (just needs enabled flag)
- status["available"] = config.is_valid()
- if not status["available"]:
+ # CRITICAL FIX: Actually verify packages are importable before reporting available
+ # Don't just check config.is_valid() - actually try to import the module
+ if not config.is_valid():
status["reason"] = errors[0] if errors else "Configuration invalid"
+ return status
+
+ # Try importing the required Graphiti packages
+ try:
+ # Attempt to import the main graphiti_memory module
+ import graphiti_core # noqa: F401
+ from graphiti_core.driver.falkordb_driver import FalkorDriver # noqa: F401
+
+ # If we got here, packages are importable
+ status["available"] = True
+ except ImportError as e:
+ status["available"] = False
+ status["reason"] = f"Graphiti packages not installed: {e}"
return status
diff --git a/apps/backend/integrations/graphiti/queries_pkg/client.py b/apps/backend/integrations/graphiti/queries_pkg/client.py
index c1961484ac..3808d9d561 100644
--- a/apps/backend/integrations/graphiti/queries_pkg/client.py
+++ b/apps/backend/integrations/graphiti/queries_pkg/client.py
@@ -34,8 +34,25 @@ def _apply_ladybug_monkeypatch() -> bool:
sys.modules["kuzu"] = real_ladybug
logger.info("Applied LadybugDB monkeypatch (kuzu -> real_ladybug)")
return True
- except ImportError:
- pass
+ except ImportError as e:
+ logger.debug(f"LadybugDB import failed: {e}")
+ # On Windows with Python 3.12+, provide more specific error details
+ # (pywin32 is only required for Python 3.12+ per requirements.txt)
+ if sys.platform == "win32" and sys.version_info >= (3, 12):
+ # Check if it's the pywin32 error using both name attribute and string match
+ # for robustness across Python versions
+ is_pywin32_error = (
+ (hasattr(e, "name") and e.name in ("pywintypes", "pywin32", "win32api"))
+ or "pywintypes" in str(e)
+ or "pywin32" in str(e)
+ )
+ if is_pywin32_error:
+ logger.error(
+ "LadybugDB requires pywin32 on Windows. "
+ "Install with: pip install pywin32>=306"
+ )
+ else:
+ logger.debug(f"Windows-specific import issue: {e}")
# Fall back to native kuzu
try:
diff --git a/apps/backend/integrations/linear/updater.py b/apps/backend/integrations/linear/updater.py
index d102642fab..02d3880cfc 100644
--- a/apps/backend/integrations/linear/updater.py
+++ b/apps/backend/integrations/linear/updater.py
@@ -118,6 +118,7 @@ def _create_linear_client() -> ClaudeSDKClient:
get_sdk_env_vars,
require_auth_token,
)
+ from phase_config import resolve_model_id
require_auth_token() # Raises ValueError if no token found
ensure_claude_code_oauth_token()
@@ -130,7 +131,7 @@ def _create_linear_client() -> ClaudeSDKClient:
return ClaudeSDKClient(
options=ClaudeAgentOptions(
- model="claude-haiku-4-5", # Fast & cheap model for simple API calls
+ model=resolve_model_id("haiku"), # Resolves via API Profile if configured
system_prompt="You are a Linear API assistant. Execute the requested Linear operation precisely.",
allowed_tools=LINEAR_TOOLS,
mcp_servers={
diff --git a/apps/backend/merge/__init__.py b/apps/backend/merge/__init__.py
index 99dc35d269..7ac715a964 100644
--- a/apps/backend/merge/__init__.py
+++ b/apps/backend/merge/__init__.py
@@ -9,7 +9,7 @@
traditional merge conflicts.
Components:
-- SemanticAnalyzer: Tree-sitter based semantic change extraction
+- SemanticAnalyzer: Regex-based semantic change extraction
- ConflictDetector: Rule-based conflict detection and compatibility analysis
- AutoMerger: Deterministic merge strategies (no AI needed)
- AIResolver: Minimal-context AI resolution for ambiguous conflicts
diff --git a/apps/backend/merge/ai_resolver/claude_client.py b/apps/backend/merge/ai_resolver/claude_client.py
index 77229043c5..40e118f923 100644
--- a/apps/backend/merge/ai_resolver/claude_client.py
+++ b/apps/backend/merge/ai_resolver/claude_client.py
@@ -82,7 +82,9 @@ async def _run_merge() -> str:
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
logger.info(f"AI merge response: {len(response_text)} chars")
diff --git a/apps/backend/merge/file_evolution/modification_tracker.py b/apps/backend/merge/file_evolution/modification_tracker.py
index b4cc281ae6..6d75237eb7 100644
--- a/apps/backend/merge/file_evolution/modification_tracker.py
+++ b/apps/backend/merge/file_evolution/modification_tracker.py
@@ -68,6 +68,7 @@ def record_modification(
new_content: str,
evolutions: dict[str, FileEvolution],
raw_diff: str | None = None,
+ skip_semantic_analysis: bool = False,
) -> TaskSnapshot | None:
"""
Record a file modification by a task.
@@ -79,6 +80,9 @@ def record_modification(
new_content: File content after modification
evolutions: Current evolution data (will be updated)
raw_diff: Optional unified diff for reference
+ skip_semantic_analysis: If True, skip expensive semantic analysis.
+ Use this for lightweight file tracking when only conflict
+ detection is needed (not conflict resolution).
Returns:
Updated TaskSnapshot, or None if file not being tracked
@@ -87,8 +91,8 @@ def record_modification(
# Get or create evolution
if rel_path not in evolutions:
- logger.warning(f"File {rel_path} not being tracked")
- # Note: We could auto-create here, but for now return None
+ # Debug level: this is expected for files not in baseline (e.g., from main's changes)
+ logger.debug(f"File {rel_path} not in evolution tracking - skipping")
return None
evolution = evolutions.get(rel_path)
@@ -105,9 +109,19 @@ def record_modification(
content_hash_before=compute_content_hash(old_content),
)
- # Analyze semantic changes
- analysis = self.analyzer.analyze_diff(rel_path, old_content, new_content)
- semantic_changes = analysis.changes
+ # Analyze semantic changes (or skip for lightweight tracking)
+ if skip_semantic_analysis:
+ # Fast path: just track the file change without analysis
+ # This is used for files that don't have conflicts
+ semantic_changes = []
+ debug(
+ MODULE,
+ f"Skipping semantic analysis for {rel_path} (lightweight tracking)",
+ )
+ else:
+ # Full analysis (only for conflict files)
+ analysis = self.analyzer.analyze_diff(rel_path, old_content, new_content)
+ semantic_changes = analysis.changes
# Update snapshot
snapshot.completed_at = datetime.now()
@@ -121,6 +135,7 @@ def record_modification(
logger.info(
f"Recorded modification to {rel_path} by {task_id}: "
f"{len(semantic_changes)} semantic changes"
+ + (" (lightweight)" if skip_semantic_analysis else "")
)
return snapshot
@@ -130,6 +145,7 @@ def refresh_from_git(
worktree_path: Path,
evolutions: dict[str, FileEvolution],
target_branch: str | None = None,
+ analyze_only_files: set[str] | None = None,
) -> None:
"""
Refresh task snapshots by analyzing git diff from worktree.
@@ -142,6 +158,10 @@ def refresh_from_git(
worktree_path: Path to the task's worktree
evolutions: Current evolution data (will be updated)
target_branch: Branch to compare against (default: detect from worktree)
+ analyze_only_files: If provided, only run full semantic analysis on
+ these files. Other files will be tracked with lightweight mode
+ (no semantic analysis). This optimizes performance by only
+ analyzing files that have actual conflicts.
"""
# Determine the target branch to compare against
if not target_branch:
@@ -154,12 +174,27 @@ def refresh_from_git(
task_id=task_id,
worktree_path=str(worktree_path),
target_branch=target_branch,
+ analyze_only_files=list(analyze_only_files)[:10]
+ if analyze_only_files
+ else "all",
)
try:
- # Get list of files changed in the worktree vs target branch
+ # Get the merge-base to accurately identify task-only changes
+ # Using two-dot diff (merge-base..HEAD) returns only files changed by the task,
+ # not files changed on the target branch since divergence
+ merge_base_result = subprocess.run(
+ ["git", "merge-base", target_branch, "HEAD"],
+ cwd=worktree_path,
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+ merge_base = merge_base_result.stdout.strip()
+
+ # Get list of files changed in the worktree since the merge-base
result = subprocess.run(
- ["git", "diff", "--name-only", f"{target_branch}...HEAD"],
+ ["git", "diff", "--name-only", f"{merge_base}..HEAD"],
cwd=worktree_path,
capture_output=True,
text=True,
@@ -175,55 +210,103 @@ def refresh_from_git(
else changed_files,
)
+ processed_count = 0
for file_path in changed_files:
- # Get the diff for this file
- diff_result = subprocess.run(
- ["git", "diff", f"{target_branch}...HEAD", "--", file_path],
- cwd=worktree_path,
- capture_output=True,
- text=True,
- check=True,
- )
-
- # Get content before (from target branch) and after (current)
try:
- show_result = subprocess.run(
- ["git", "show", f"{target_branch}:{file_path}"],
+ # Get the diff for this file (using merge-base for accurate task-only diff)
+ diff_result = subprocess.run(
+ ["git", "diff", f"{merge_base}..HEAD", "--", file_path],
cwd=worktree_path,
capture_output=True,
text=True,
check=True,
)
- old_content = show_result.stdout
- except subprocess.CalledProcessError:
- # File is new
- old_content = ""
- current_file = worktree_path / file_path
- if current_file.exists():
+ # Get content before (from merge-base - the point where task branched)
try:
- new_content = current_file.read_text(encoding="utf-8")
- except UnicodeDecodeError:
- new_content = current_file.read_text(
- encoding="utf-8", errors="replace"
+ show_result = subprocess.run(
+ ["git", "show", f"{merge_base}:{file_path}"],
+ cwd=worktree_path,
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+ old_content = show_result.stdout
+ except subprocess.CalledProcessError:
+ # File is new
+ old_content = ""
+
+ current_file = worktree_path / file_path
+ if current_file.exists():
+ try:
+ new_content = current_file.read_text(encoding="utf-8")
+ except UnicodeDecodeError:
+ new_content = current_file.read_text(
+ encoding="utf-8", errors="replace"
+ )
+ else:
+ # File was deleted
+ new_content = ""
+
+ # Auto-create FileEvolution entry if not already tracked
+ # This handles retroactive tracking when capture_baselines wasn't called
+ rel_path = self.storage.get_relative_path(file_path)
+ if rel_path not in evolutions:
+ evolutions[rel_path] = FileEvolution(
+ file_path=rel_path,
+ baseline_commit=merge_base,
+ baseline_captured_at=datetime.now(),
+ baseline_content_hash=compute_content_hash(old_content),
+ baseline_snapshot_path="", # Not storing baseline file
+ task_snapshots=[],
+ )
+ debug(
+ MODULE,
+ f"Auto-created evolution entry for {rel_path}",
+ baseline_commit=merge_base[:8],
)
- else:
- # File was deleted
- new_content = ""
-
- # Record the modification
- self.record_modification(
- task_id=task_id,
- file_path=file_path,
- old_content=old_content,
- new_content=new_content,
- evolutions=evolutions,
- raw_diff=diff_result.stdout,
- )
- logger.info(
- f"Refreshed {len(changed_files)} files from worktree for task {task_id}"
- )
+ # Determine if this file needs full semantic analysis
+ # If analyze_only_files is provided, only analyze files in that set
+ # Otherwise, analyze all files (backward compatible)
+ skip_analysis = False
+ if analyze_only_files is not None:
+ skip_analysis = rel_path not in analyze_only_files
+
+ # Record the modification
+ self.record_modification(
+ task_id=task_id,
+ file_path=file_path,
+ old_content=old_content,
+ new_content=new_content,
+ evolutions=evolutions,
+ raw_diff=diff_result.stdout,
+ skip_semantic_analysis=skip_analysis,
+ )
+ processed_count += 1
+
+ except subprocess.CalledProcessError as e:
+ # Log error but continue with remaining files
+ logger.warning(
+ f"Failed to process {file_path} in refresh_from_git: {e}"
+ )
+ continue
+
+ # Calculate how many files were fully analyzed vs just tracked
+ if analyze_only_files is not None:
+ analyzed_count = len(
+ [f for f in changed_files if f in analyze_only_files]
+ )
+ tracked_only_count = processed_count - analyzed_count
+ logger.info(
+ f"Refreshed {processed_count}/{len(changed_files)} files from worktree for task {task_id} "
+ f"(analyzed: {analyzed_count}, tracked only: {tracked_only_count})"
+ )
+ else:
+ logger.info(
+ f"Refreshed {processed_count}/{len(changed_files)} files from worktree for task {task_id} "
+ "(full analysis on all files)"
+ )
except subprocess.CalledProcessError as e:
logger.error(f"Failed to refresh from git: {e}")
@@ -248,35 +331,23 @@ def mark_task_completed(
def _detect_target_branch(self, worktree_path: Path) -> str:
"""
- Detect the target branch to compare against for a worktree.
+ Detect the base branch to compare against for a worktree.
+
+ This finds the branch that the worktree was created FROM by looking
+ for common branch names (main, master, develop) that have a valid
+ merge-base with the worktree.
- This finds the branch that the worktree was created from by looking
- at the merge-base between the worktree and common branch names.
+ Note: We don't use upstream tracking because that returns the worktree's
+ own branch (e.g., origin/auto-claude/...) rather than the base branch.
Args:
worktree_path: Path to the worktree
Returns:
- The detected target branch name, defaults to 'main' if detection fails
+ The detected base branch name, defaults to 'main' if detection fails
"""
- # Try to get the upstream tracking branch
- try:
- result = subprocess.run(
- ["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"],
- cwd=worktree_path,
- capture_output=True,
- text=True,
- )
- if result.returncode == 0 and result.stdout.strip():
- upstream = result.stdout.strip()
- # Extract branch name from origin/branch format
- if "/" in upstream:
- return upstream.split("/", 1)[1]
- return upstream
- except subprocess.CalledProcessError:
- pass
-
# Try common branch names and find which one has a valid merge-base
+ # This is the reliable way to find what branch the worktree diverged from
for branch in ["main", "master", "develop"]:
try:
result = subprocess.run(
@@ -286,14 +357,39 @@ def _detect_target_branch(self, worktree_path: Path) -> str:
text=True,
)
if result.returncode == 0:
+ debug(
+ MODULE,
+ f"Detected base branch: {branch}",
+ worktree_path=str(worktree_path),
+ )
return branch
except subprocess.CalledProcessError:
continue
- # Default to main
+ # Before defaulting to 'main', verify it exists
+ # This handles non-standard projects that use trunk, production, etc.
+ try:
+ result = subprocess.run(
+ ["git", "rev-parse", "--verify", "main"],
+ cwd=worktree_path,
+ capture_output=True,
+ text=True,
+ )
+ if result.returncode == 0:
+ debug_warning(
+ MODULE,
+ "Could not find merge-base with standard branches, defaulting to 'main'",
+ worktree_path=str(worktree_path),
+ )
+ return "main"
+ except subprocess.CalledProcessError:
+ pass
+
+ # Last resort: use HEAD~10 as a fallback comparison point
+ # This allows modification tracking even on non-standard branch setups
debug_warning(
MODULE,
- "Could not detect target branch, defaulting to 'main'",
+ "No standard base branch found, modification tracking may be limited",
worktree_path=str(worktree_path),
)
- return "main"
+ return "HEAD~10"
diff --git a/apps/backend/merge/file_evolution/tracker.py b/apps/backend/merge/file_evolution/tracker.py
index c9df3b1a68..2a8d248eb4 100644
--- a/apps/backend/merge/file_evolution/tracker.py
+++ b/apps/backend/merge/file_evolution/tracker.py
@@ -327,6 +327,7 @@ def refresh_from_git(
task_id: str,
worktree_path: Path,
target_branch: str | None = None,
+ analyze_only_files: set[str] | None = None,
) -> None:
"""
Refresh task snapshots by analyzing git diff from worktree.
@@ -338,11 +339,16 @@ def refresh_from_git(
task_id: The task identifier
worktree_path: Path to the task's worktree
target_branch: Branch to compare against (default: auto-detect)
+ analyze_only_files: If provided, only run full semantic analysis on
+ these files. Other files will be tracked with lightweight mode
+ (no semantic analysis). This optimizes performance by only
+ analyzing files that have actual conflicts.
"""
self.modification_tracker.refresh_from_git(
task_id=task_id,
worktree_path=worktree_path,
evolutions=self._evolutions,
target_branch=target_branch,
+ analyze_only_files=analyze_only_files,
)
self._save_evolutions()
diff --git a/apps/backend/merge/file_merger.py b/apps/backend/merge/file_merger.py
index 1038055554..7fc3c35dc7 100644
--- a/apps/backend/merge/file_merger.py
+++ b/apps/backend/merge/file_merger.py
@@ -19,6 +19,35 @@
from .types import ChangeType, SemanticChange, TaskSnapshot
+def detect_line_ending(content: str) -> str:
+ """
+ Detect line ending style in content using priority-based detection.
+
+ Uses a priority order (CRLF > CR > LF) to detect the line ending style.
+ CRLF is checked first because it contains LF, so presence of any CRLF
+ indicates Windows-style endings. This approach is fast and works well
+ for files that consistently use one style.
+
+ Note: This returns the first detected style by priority, not the most
+ frequent style. For files with mixed line endings, consider normalizing
+ to a single style before processing.
+
+ Args:
+ content: File content to analyze
+
+ Returns:
+ The detected line ending string: "\\r\\n", "\\r", or "\\n"
+ """
+ # Check for CRLF first (Windows) - must check before LF since CRLF contains LF
+ if "\r\n" in content:
+ return "\r\n"
+ # Check for CR (classic Mac, rare but possible)
+ if "\r" in content:
+ return "\r"
+ # Default to LF (Unix/modern Mac)
+ return "\n"
+
+
def apply_single_task_changes(
baseline: str,
snapshot: TaskSnapshot,
@@ -35,7 +64,16 @@ def apply_single_task_changes(
Returns:
Modified content with changes applied
"""
- content = baseline
+ # Detect line ending style before normalizing
+ original_line_ending = detect_line_ending(baseline)
+
+ # Normalize to LF for consistent matching with regex_analyzer output
+ # The regex_analyzer normalizes content to LF when extracting content_before/after,
+ # so we must also normalize baseline to ensure replace() matches correctly
+ content = baseline.replace("\r\n", "\n").replace("\r", "\n")
+
+ # Use LF for internal processing
+ line_ending = "\n"
for change in snapshot.semantic_changes:
if change.content_before and change.content_after:
@@ -45,13 +83,19 @@ def apply_single_task_changes(
# Addition - need to determine where to add
if change.change_type == ChangeType.ADD_IMPORT:
# Add import at top
- lines = content.split("\n")
+ lines = content.splitlines()
import_end = find_import_end(lines, file_path)
lines.insert(import_end, change.content_after)
- content = "\n".join(lines)
+ content = line_ending.join(lines)
elif change.change_type == ChangeType.ADD_FUNCTION:
# Add function at end (before exports)
- content += f"\n\n{change.content_after}"
+ content += f"{line_ending}{line_ending}{change.content_after}"
+
+ # Restore original line ending style if it was CRLF
+ if original_line_ending == "\r\n":
+ content = content.replace("\n", "\r\n")
+ elif original_line_ending == "\r":
+ content = content.replace("\n", "\r")
return content
@@ -72,7 +116,16 @@ def combine_non_conflicting_changes(
Returns:
Combined content with all changes applied
"""
- content = baseline
+ # Detect line ending style before normalizing
+ original_line_ending = detect_line_ending(baseline)
+
+ # Normalize to LF for consistent matching with regex_analyzer output
+ # The regex_analyzer normalizes content to LF when extracting content_before/after,
+ # so we must also normalize baseline to ensure replace() matches correctly
+ content = baseline.replace("\r\n", "\n").replace("\r", "\n")
+
+ # Use LF for internal processing
+ line_ending = "\n"
# Group changes by type for proper ordering
imports: list[SemanticChange] = []
@@ -96,13 +149,13 @@ def combine_non_conflicting_changes(
# Add imports
if imports:
- lines = content.split("\n")
+ lines = content.splitlines()
import_end = find_import_end(lines, file_path)
for imp in imports:
if imp.content_after and imp.content_after not in content:
lines.insert(import_end, imp.content_after)
import_end += 1
- content = "\n".join(lines)
+ content = line_ending.join(lines)
# Apply modifications
for mod in modifications:
@@ -112,15 +165,21 @@ def combine_non_conflicting_changes(
# Add functions
for func in functions:
if func.content_after:
- content += f"\n\n{func.content_after}"
+ content += f"{line_ending}{line_ending}{func.content_after}"
# Apply other changes
for change in other:
if change.content_after and not change.content_before:
- content += f"\n{change.content_after}"
+ content += f"{line_ending}{change.content_after}"
elif change.content_before and change.content_after:
content = content.replace(change.content_before, change.content_after)
+ # Restore original line ending style if it was CRLF
+ if original_line_ending == "\r\n":
+ content = content.replace("\n", "\r\n")
+ elif original_line_ending == "\r":
+ content = content.replace("\n", "\r")
+
return content
diff --git a/apps/backend/merge/git_utils.py b/apps/backend/merge/git_utils.py
index 92bfd40f7b..6868d0d015 100644
--- a/apps/backend/merge/git_utils.py
+++ b/apps/backend/merge/git_utils.py
@@ -27,28 +27,19 @@ def find_worktree(project_dir: Path, task_id: str) -> Path | None:
Returns:
Path to the worktree, or None if not found
"""
- # Check common locations
- worktrees_dir = project_dir / ".worktrees"
- if worktrees_dir.exists():
- # Look for worktree with task_id in name
- for entry in worktrees_dir.iterdir():
+ # Check new path first
+ new_worktrees_dir = project_dir / ".auto-claude" / "worktrees" / "tasks"
+ if new_worktrees_dir.exists():
+ for entry in new_worktrees_dir.iterdir():
if entry.is_dir() and task_id in entry.name:
return entry
- # Try git worktree list
- try:
- result = subprocess.run(
- ["git", "worktree", "list", "--porcelain"],
- cwd=project_dir,
- capture_output=True,
- text=True,
- check=True,
- )
- for line in result.stdout.split("\n"):
- if line.startswith("worktree ") and task_id in line:
- return Path(line.split(" ", 1)[1])
- except subprocess.CalledProcessError:
- pass
+ # Legacy fallback for backwards compatibility
+ legacy_worktrees_dir = project_dir / ".worktrees"
+ if legacy_worktrees_dir.exists():
+ for entry in legacy_worktrees_dir.iterdir():
+ if entry.is_dir() and task_id in entry.name:
+ return entry
return None
diff --git a/apps/backend/merge/semantic_analysis/__init__.py b/apps/backend/merge/semantic_analysis/__init__.py
index e06d039969..0f4cc099c4 100644
--- a/apps/backend/merge/semantic_analysis/__init__.py
+++ b/apps/backend/merge/semantic_analysis/__init__.py
@@ -1,12 +1,10 @@
"""
-Semantic analyzer package for AST-based code analysis.
+Semantic analyzer package for code analysis.
This package provides modular semantic analysis capabilities:
- models.py: Data structures for extracted elements
-- python_analyzer.py: Python-specific AST extraction
-- js_analyzer.py: JavaScript/TypeScript-specific AST extraction
- comparison.py: Element comparison and change classification
-- regex_analyzer.py: Fallback regex-based analysis
+- regex_analyzer.py: Regex-based analysis for code changes
"""
from .models import ExtractedElement
diff --git a/apps/backend/merge/semantic_analysis/js_analyzer.py b/apps/backend/merge/semantic_analysis/js_analyzer.py
deleted file mode 100644
index 048d03acba..0000000000
--- a/apps/backend/merge/semantic_analysis/js_analyzer.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""
-JavaScript/TypeScript-specific semantic analysis using tree-sitter.
-"""
-
-from __future__ import annotations
-
-from collections.abc import Callable
-
-from .models import ExtractedElement
-
-try:
- from tree_sitter import Node
-except ImportError:
- Node = None
-
-
-def extract_js_elements(
- node: Node,
- elements: dict[str, ExtractedElement],
- get_text: Callable[[Node], str],
- get_line: Callable[[int], int],
- ext: str,
- parent: str | None = None,
-) -> None:
- """
- Extract structural elements from JavaScript/TypeScript AST.
-
- Args:
- node: The tree-sitter node to extract from
- elements: Dictionary to populate with extracted elements
- get_text: Function to extract text from a node
- get_line: Function to convert byte position to line number
- ext: File extension (.js, .jsx, .ts, .tsx)
- parent: Parent element name for nested elements
- """
- for child in node.children:
- if child.type == "import_statement":
- text = get_text(child)
- # Try to extract the source module
- source_node = child.child_by_field_name("source")
- if source_node:
- source = get_text(source_node).strip("'\"")
- elements[f"import:{source}"] = ExtractedElement(
- element_type="import",
- name=source,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=text,
- )
-
- elif child.type in {"function_declaration", "function"}:
- name_node = child.child_by_field_name("name")
- if name_node:
- name = get_text(name_node)
- full_name = f"{parent}.{name}" if parent else name
- elements[f"function:{full_name}"] = ExtractedElement(
- element_type="function",
- name=full_name,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=get_text(child),
- parent=parent,
- )
-
- elif child.type == "arrow_function":
- # Arrow functions are usually assigned to variables
- # We'll catch these via variable declarations
- pass
-
- elif child.type in {"lexical_declaration", "variable_declaration"}:
- # const/let/var declarations
- for declarator in child.children:
- if declarator.type == "variable_declarator":
- name_node = declarator.child_by_field_name("name")
- value_node = declarator.child_by_field_name("value")
- if name_node:
- name = get_text(name_node)
- content = get_text(child)
-
- # Check if it's a function (arrow function or function expression)
- is_function = False
- if value_node and value_node.type in {
- "arrow_function",
- "function",
- }:
- is_function = True
- elements[f"function:{name}"] = ExtractedElement(
- element_type="function",
- name=name,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=content,
- parent=parent,
- )
- else:
- elements[f"variable:{name}"] = ExtractedElement(
- element_type="variable",
- name=name,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=content,
- parent=parent,
- )
-
- elif child.type == "class_declaration":
- name_node = child.child_by_field_name("name")
- if name_node:
- name = get_text(name_node)
- elements[f"class:{name}"] = ExtractedElement(
- element_type="class",
- name=name,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=get_text(child),
- )
- # Recurse into class body
- body = child.child_by_field_name("body")
- if body:
- extract_js_elements(
- body, elements, get_text, get_line, ext, parent=name
- )
-
- elif child.type == "method_definition":
- name_node = child.child_by_field_name("name")
- if name_node:
- name = get_text(name_node)
- full_name = f"{parent}.{name}" if parent else name
- elements[f"method:{full_name}"] = ExtractedElement(
- element_type="method",
- name=full_name,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=get_text(child),
- parent=parent,
- )
-
- elif child.type == "export_statement":
- # Recurse into exports to find the actual declaration
- extract_js_elements(child, elements, get_text, get_line, ext, parent)
-
- # TypeScript specific
- elif child.type in {"interface_declaration", "type_alias_declaration"}:
- name_node = child.child_by_field_name("name")
- if name_node:
- name = get_text(name_node)
- elem_type = "interface" if "interface" in child.type else "type"
- elements[f"{elem_type}:{name}"] = ExtractedElement(
- element_type=elem_type,
- name=name,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=get_text(child),
- )
-
- # Recurse into statement blocks
- elif child.type in {"program", "statement_block", "class_body"}:
- extract_js_elements(child, elements, get_text, get_line, ext, parent)
diff --git a/apps/backend/merge/semantic_analysis/python_analyzer.py b/apps/backend/merge/semantic_analysis/python_analyzer.py
deleted file mode 100644
index def71a943b..0000000000
--- a/apps/backend/merge/semantic_analysis/python_analyzer.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""
-Python-specific semantic analysis using tree-sitter.
-"""
-
-from __future__ import annotations
-
-from collections.abc import Callable
-
-from .models import ExtractedElement
-
-try:
- from tree_sitter import Node
-except ImportError:
- Node = None
-
-
-def extract_python_elements(
- node: Node,
- elements: dict[str, ExtractedElement],
- get_text: Callable[[Node], str],
- get_line: Callable[[int], int],
- parent: str | None = None,
-) -> None:
- """
- Extract structural elements from Python AST.
-
- Args:
- node: The tree-sitter node to extract from
- elements: Dictionary to populate with extracted elements
- get_text: Function to extract text from a node
- get_line: Function to convert byte position to line number
- parent: Parent element name for nested elements
- """
- for child in node.children:
- if child.type == "import_statement":
- # import x, y
- text = get_text(child)
- # Extract module names
- for name_node in child.children:
- if name_node.type == "dotted_name":
- name = get_text(name_node)
- elements[f"import:{name}"] = ExtractedElement(
- element_type="import",
- name=name,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=text,
- )
-
- elif child.type == "import_from_statement":
- # from x import y, z
- text = get_text(child)
- module = None
- for sub in child.children:
- if sub.type == "dotted_name":
- module = get_text(sub)
- break
- if module:
- elements[f"import_from:{module}"] = ExtractedElement(
- element_type="import_from",
- name=module,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=text,
- )
-
- elif child.type == "function_definition":
- name_node = child.child_by_field_name("name")
- if name_node:
- name = get_text(name_node)
- full_name = f"{parent}.{name}" if parent else name
- elements[f"function:{full_name}"] = ExtractedElement(
- element_type="function",
- name=full_name,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=get_text(child),
- parent=parent,
- )
-
- elif child.type == "class_definition":
- name_node = child.child_by_field_name("name")
- if name_node:
- name = get_text(name_node)
- elements[f"class:{name}"] = ExtractedElement(
- element_type="class",
- name=name,
- start_line=get_line(child.start_byte),
- end_line=get_line(child.end_byte),
- content=get_text(child),
- )
- # Recurse into class body for methods
- body = child.child_by_field_name("body")
- if body:
- extract_python_elements(
- body, elements, get_text, get_line, parent=name
- )
-
- elif child.type == "decorated_definition":
- # Handle decorated functions/classes
- for sub in child.children:
- if sub.type in {"function_definition", "class_definition"}:
- extract_python_elements(child, elements, get_text, get_line, parent)
- break
-
- # Recurse for other compound statements
- elif child.type in {
- "if_statement",
- "while_statement",
- "for_statement",
- "try_statement",
- "with_statement",
- }:
- extract_python_elements(child, elements, get_text, get_line, parent)
diff --git a/apps/backend/merge/semantic_analysis/regex_analyzer.py b/apps/backend/merge/semantic_analysis/regex_analyzer.py
index 40556f765c..9ceff32bee 100644
--- a/apps/backend/merge/semantic_analysis/regex_analyzer.py
+++ b/apps/backend/merge/semantic_analysis/regex_analyzer.py
@@ -1,5 +1,5 @@
"""
-Regex-based fallback analysis when tree-sitter is not available.
+Regex-based semantic analysis for code changes.
"""
from __future__ import annotations
@@ -17,7 +17,7 @@ def analyze_with_regex(
ext: str,
) -> FileAnalysis:
"""
- Fallback analysis using regex when tree-sitter isn't available.
+ Analyze code changes using regex patterns.
Args:
file_path: Path to the file being analyzed
@@ -30,11 +30,16 @@ def analyze_with_regex(
"""
changes: list[SemanticChange] = []
+ # Normalize line endings to LF for consistent cross-platform behavior
+ # This handles Windows CRLF, old Mac CR, and Unix LF
+ before_normalized = before.replace("\r\n", "\n").replace("\r", "\n")
+ after_normalized = after.replace("\r\n", "\n").replace("\r", "\n")
+
# Get a unified diff
diff = list(
difflib.unified_diff(
- before.splitlines(keepends=True),
- after.splitlines(keepends=True),
+ before_normalized.splitlines(keepends=True),
+ after_normalized.splitlines(keepends=True),
lineterm="",
)
)
@@ -89,8 +94,22 @@ def analyze_with_regex(
# Detect function changes (simplified)
func_pattern = get_function_pattern(ext)
if func_pattern:
- funcs_before = set(func_pattern.findall(before))
- funcs_after = set(func_pattern.findall(after))
+ # For JS/TS patterns with alternation, findall() returns tuples
+ # Extract the non-empty match from each tuple
+ def extract_func_names(matches):
+ names = set()
+ for match in matches:
+ if isinstance(match, tuple):
+ # Get the first non-empty group from the tuple
+ name = next((m for m in match if m), None)
+ if name:
+ names.add(name)
+ elif match:
+ names.add(match)
+ return names
+
+ funcs_before = extract_func_names(func_pattern.findall(before_normalized))
+ funcs_after = extract_func_names(func_pattern.findall(after_normalized))
for func in funcs_after - funcs_before:
changes.append(
diff --git a/apps/backend/merge/semantic_analyzer.py b/apps/backend/merge/semantic_analyzer.py
index 07aea59056..30697c1a94 100644
--- a/apps/backend/merge/semantic_analyzer.py
+++ b/apps/backend/merge/semantic_analyzer.py
@@ -2,32 +2,27 @@
Semantic Analyzer
=================
-Analyzes code changes at a semantic level using tree-sitter.
+Analyzes code changes at a semantic level using regex-based heuristics.
-This module provides AST-based analysis of code changes, extracting
-meaningful semantic changes like "added import", "modified function",
-"wrapped JSX element" rather than line-level diffs.
-
-When tree-sitter is not available, falls back to regex-based heuristics.
+This module provides analysis of code changes, extracting meaningful
+semantic changes like "added import", "modified function", "wrapped JSX element"
+rather than line-level diffs.
"""
from __future__ import annotations
import logging
from pathlib import Path
-from typing import Any
-from .types import ChangeType, FileAnalysis
+from .types import FileAnalysis
# Import debug utilities
try:
from debug import (
debug,
debug_detailed,
- debug_error,
debug_success,
debug_verbose,
- is_debug_enabled,
)
except ImportError:
# Fallback if debug module not available
@@ -43,71 +38,18 @@ def debug_verbose(*args, **kwargs):
def debug_success(*args, **kwargs):
pass
- def debug_error(*args, **kwargs):
- pass
-
- def is_debug_enabled():
- return False
-
logger = logging.getLogger(__name__)
MODULE = "merge.semantic_analyzer"
-# Try to import tree-sitter - it's optional but recommended
-TREE_SITTER_AVAILABLE = False
-try:
- import tree_sitter # noqa: F401
- from tree_sitter import Language, Node, Parser, Tree
-
- TREE_SITTER_AVAILABLE = True
- logger.info("tree-sitter available, using AST-based analysis")
-except ImportError:
- logger.warning("tree-sitter not available, using regex-based fallback")
- Tree = None
- Node = None
-
-# Try to import language bindings
-LANGUAGES_AVAILABLE: dict[str, Any] = {}
-if TREE_SITTER_AVAILABLE:
- try:
- import tree_sitter_python as tspython
-
- LANGUAGES_AVAILABLE[".py"] = tspython.language()
- except ImportError:
- pass
-
- try:
- import tree_sitter_javascript as tsjs
-
- LANGUAGES_AVAILABLE[".js"] = tsjs.language()
- LANGUAGES_AVAILABLE[".jsx"] = tsjs.language()
- except ImportError:
- pass
-
- try:
- import tree_sitter_typescript as tsts
-
- LANGUAGES_AVAILABLE[".ts"] = tsts.language_typescript()
- LANGUAGES_AVAILABLE[".tsx"] = tsts.language_tsx()
- except ImportError:
- pass
-
-# Import our modular components
-from .semantic_analysis.comparison import compare_elements
+# Import regex-based analyzer
from .semantic_analysis.models import ExtractedElement
from .semantic_analysis.regex_analyzer import analyze_with_regex
-if TREE_SITTER_AVAILABLE:
- from .semantic_analysis.js_analyzer import extract_js_elements
- from .semantic_analysis.python_analyzer import extract_python_elements
-
class SemanticAnalyzer:
"""
- Analyzes code changes at a semantic level.
-
- Uses tree-sitter for AST-based analysis when available,
- falling back to regex-based heuristics when not.
+ Analyzes code changes at a semantic level using regex-based heuristics.
Example:
analyzer = SemanticAnalyzer()
@@ -117,28 +59,8 @@ class SemanticAnalyzer:
"""
def __init__(self):
- """Initialize the analyzer with available parsers."""
- self._parsers: dict[str, Parser] = {}
-
- debug(
- MODULE,
- "Initializing SemanticAnalyzer",
- tree_sitter_available=TREE_SITTER_AVAILABLE,
- )
-
- if TREE_SITTER_AVAILABLE:
- for ext, lang in LANGUAGES_AVAILABLE.items():
- parser = Parser()
- parser.language = Language(lang)
- self._parsers[ext] = parser
- debug_detailed(MODULE, f"Initialized parser for {ext}")
- debug_success(
- MODULE,
- "SemanticAnalyzer initialized",
- parsers=list(self._parsers.keys()),
- )
- else:
- debug(MODULE, "Using regex-based fallback (tree-sitter not available)")
+ """Initialize the analyzer."""
+ debug(MODULE, "Initializing SemanticAnalyzer (regex-based)")
def analyze_diff(
self,
@@ -171,13 +93,8 @@ def analyze_diff(
task_id=task_id,
)
- # Use tree-sitter if available for this language
- if ext in self._parsers:
- debug_detailed(MODULE, f"Using tree-sitter parser for {ext}")
- analysis = self._analyze_with_tree_sitter(file_path, before, after, ext)
- else:
- debug_detailed(MODULE, f"Using regex fallback for {ext}")
- analysis = analyze_with_regex(file_path, before, after, ext)
+ # Use regex-based analysis
+ analysis = analyze_with_regex(file_path, before, after, ext)
debug_success(
MODULE,
@@ -201,77 +118,6 @@ def analyze_diff(
return analysis
- def _analyze_with_tree_sitter(
- self,
- file_path: str,
- before: str,
- after: str,
- ext: str,
- ) -> FileAnalysis:
- """Analyze using tree-sitter AST parsing."""
- parser = self._parsers[ext]
-
- tree_before = parser.parse(bytes(before, "utf-8"))
- tree_after = parser.parse(bytes(after, "utf-8"))
-
- # Extract structural elements from both versions
- elements_before = self._extract_elements(tree_before, before, ext)
- elements_after = self._extract_elements(tree_after, after, ext)
-
- # Compare and generate semantic changes
- changes = compare_elements(elements_before, elements_after, ext)
-
- # Build the analysis
- analysis = FileAnalysis(file_path=file_path, changes=changes)
-
- # Populate summary fields
- for change in changes:
- if change.change_type in {
- ChangeType.MODIFY_FUNCTION,
- ChangeType.ADD_HOOK_CALL,
- }:
- analysis.functions_modified.add(change.target)
- elif change.change_type == ChangeType.ADD_FUNCTION:
- analysis.functions_added.add(change.target)
- elif change.change_type == ChangeType.ADD_IMPORT:
- analysis.imports_added.add(change.target)
- elif change.change_type == ChangeType.REMOVE_IMPORT:
- analysis.imports_removed.add(change.target)
- elif change.change_type in {
- ChangeType.MODIFY_CLASS,
- ChangeType.ADD_METHOD,
- }:
- analysis.classes_modified.add(change.target.split(".")[0])
-
- analysis.total_lines_changed += change.line_end - change.line_start + 1
-
- return analysis
-
- def _extract_elements(
- self,
- tree: Tree,
- source: str,
- ext: str,
- ) -> dict[str, ExtractedElement]:
- """Extract structural elements from a syntax tree."""
- elements: dict[str, ExtractedElement] = {}
- source_bytes = bytes(source, "utf-8")
-
- def get_text(node: Node) -> str:
- return source_bytes[node.start_byte : node.end_byte].decode("utf-8")
-
- def get_line(byte_pos: int) -> int:
- # Convert byte position to line number (1-indexed)
- return source[:byte_pos].count("\n") + 1
-
- # Language-specific extraction
- if ext == ".py":
- extract_python_elements(tree.root_node, elements, get_text, get_line)
- elif ext in {".js", ".jsx", ".ts", ".tsx"}:
- extract_js_elements(tree.root_node, elements, get_text, get_line, ext)
-
- return elements
-
def analyze_file(self, file_path: str, content: str) -> FileAnalysis:
"""
Analyze a single file's structure (not a diff).
@@ -291,12 +137,7 @@ def analyze_file(self, file_path: str, content: str) -> FileAnalysis:
@property
def supported_extensions(self) -> set[str]:
"""Get the set of supported file extensions."""
- if TREE_SITTER_AVAILABLE:
- # Tree-sitter extensions plus regex fallbacks
- return set(self._parsers.keys()) | {".py", ".js", ".jsx", ".ts", ".tsx"}
- else:
- # Only regex-supported extensions
- return {".py", ".js", ".jsx", ".ts", ".tsx"}
+ return {".py", ".js", ".jsx", ".ts", ".tsx"}
def is_supported(self, file_path: str) -> bool:
"""Check if a file type is supported for semantic analysis."""
diff --git a/apps/backend/merge/timeline_git.py b/apps/backend/merge/timeline_git.py
index ebf0952a22..cc9e6ca6cd 100644
--- a/apps/backend/merge/timeline_git.py
+++ b/apps/backend/merge/timeline_git.py
@@ -189,7 +189,14 @@ def get_worktree_file_content(self, task_id: str, file_path: str) -> str:
task_id.replace("task-", "") if task_id.startswith("task-") else task_id
)
- worktree_path = self.project_path / ".worktrees" / spec_name / file_path
+ worktree_path = (
+ self.project_path
+ / ".auto-claude"
+ / "worktrees"
+ / "tasks"
+ / spec_name
+ / file_path
+ )
if worktree_path.exists():
try:
return worktree_path.read_text(encoding="utf-8")
diff --git a/apps/backend/ollama_model_detector.py b/apps/backend/ollama_model_detector.py
index 40819e029c..aaa43883a5 100644
--- a/apps/backend/ollama_model_detector.py
+++ b/apps/backend/ollama_model_detector.py
@@ -16,6 +16,7 @@
import argparse
import json
+import re
import sys
import urllib.error
import urllib.request
@@ -23,6 +24,10 @@
DEFAULT_OLLAMA_URL = "http://localhost:11434"
+# Minimum Ollama version required for newer embedding models (qwen3-embedding, etc.)
+# These models were added in Ollama 0.10.0
+MIN_OLLAMA_VERSION_FOR_NEW_MODELS = "0.10.0"
+
# Known embedding models and their dimensions
# This list helps identify embedding models from the model name
KNOWN_EMBEDDING_MODELS = {
@@ -31,10 +36,26 @@
"dim": 768,
"description": "Google EmbeddingGemma (lightweight)",
},
- "qwen3-embedding": {"dim": 1024, "description": "Qwen3 Embedding (0.6B)"},
- "qwen3-embedding:0.6b": {"dim": 1024, "description": "Qwen3 Embedding 0.6B"},
- "qwen3-embedding:4b": {"dim": 2560, "description": "Qwen3 Embedding 4B"},
- "qwen3-embedding:8b": {"dim": 4096, "description": "Qwen3 Embedding 8B"},
+ "qwen3-embedding": {
+ "dim": 1024,
+ "description": "Qwen3 Embedding (0.6B)",
+ "min_version": "0.10.0",
+ },
+ "qwen3-embedding:0.6b": {
+ "dim": 1024,
+ "description": "Qwen3 Embedding 0.6B",
+ "min_version": "0.10.0",
+ },
+ "qwen3-embedding:4b": {
+ "dim": 2560,
+ "description": "Qwen3 Embedding 4B",
+ "min_version": "0.10.0",
+ },
+ "qwen3-embedding:8b": {
+ "dim": 4096,
+ "description": "Qwen3 Embedding 8B",
+ "min_version": "0.10.0",
+ },
"bge-base-en": {"dim": 768, "description": "BAAI General Embedding - Base"},
"bge-large-en": {"dim": 1024, "description": "BAAI General Embedding - Large"},
"bge-small-en": {"dim": 384, "description": "BAAI General Embedding - Small"},
@@ -63,6 +84,7 @@
"size_estimate": "3.1 GB",
"dim": 2560,
"badge": "recommended",
+ "min_ollama_version": "0.10.0",
},
{
"name": "qwen3-embedding:8b",
@@ -70,6 +92,7 @@
"size_estimate": "6.0 GB",
"dim": 4096,
"badge": "quality",
+ "min_ollama_version": "0.10.0",
},
{
"name": "qwen3-embedding:0.6b",
@@ -77,6 +100,7 @@
"size_estimate": "494 MB",
"dim": 1024,
"badge": "fast",
+ "min_ollama_version": "0.10.0",
},
{
"name": "embeddinggemma",
@@ -112,6 +136,22 @@
]
+def parse_version(version_str: str | None) -> tuple[int, ...]:
+ """Parse a version string like '0.10.0' into a tuple for comparison."""
+ if not version_str or not isinstance(version_str, str):
+ return (0, 0, 0)
+ # Extract just the numeric parts (handles versions like "0.10.0-rc1")
+ match = re.match(r"(\d+)\.(\d+)\.(\d+)", version_str)
+ if match:
+ return tuple(int(x) for x in match.groups())
+ return (0, 0, 0)
+
+
+def version_gte(version: str | None, min_version: str | None) -> bool:
+ """Check if version >= min_version."""
+ return parse_version(version) >= parse_version(min_version)
+
+
def output_json(success: bool, data: Any = None, error: str | None = None) -> None:
"""Output JSON result to stdout and exit."""
result = {"success": success}
@@ -145,6 +185,14 @@ def fetch_ollama_api(base_url: str, endpoint: str, timeout: int = 5) -> dict | N
return None
+def get_ollama_version(base_url: str) -> str | None:
+ """Get the Ollama server version."""
+ result = fetch_ollama_api(base_url, "api/version")
+ if result:
+ return result.get("version")
+ return None
+
+
def is_embedding_model(model_name: str) -> bool:
"""Check if a model name suggests it's an embedding model."""
name_lower = model_name.lower()
@@ -192,6 +240,19 @@ def get_embedding_description(model_name: str) -> str:
return "Embedding model"
+def get_model_min_version(model_name: str) -> str | None:
+ """Get the minimum Ollama version required for a model."""
+ name_lower = model_name.lower()
+
+ # Sort keys by length descending to match more specific names first
+ # e.g., "qwen3-embedding:8b" before "qwen3-embedding"
+ for known_model in sorted(KNOWN_EMBEDDING_MODELS.keys(), key=len, reverse=True):
+ if known_model in name_lower:
+ return KNOWN_EMBEDDING_MODELS[known_model].get("min_version")
+
+ return None
+
+
def cmd_check_status(args) -> None:
"""Check if Ollama is running and accessible."""
base_url = args.base_url or DEFAULT_OLLAMA_URL
@@ -200,12 +261,18 @@ def cmd_check_status(args) -> None:
result = fetch_ollama_api(base_url, "api/version")
if result:
+ version = result.get("version", "unknown")
output_json(
True,
data={
"running": True,
"url": base_url,
- "version": result.get("version", "unknown"),
+ "version": version,
+ "supports_new_models": version_gte(
+ version, MIN_OLLAMA_VERSION_FOR_NEW_MODELS
+ )
+ if version != "unknown"
+ else None,
},
)
else:
@@ -319,6 +386,9 @@ def cmd_get_recommended_models(args) -> None:
"""Get recommended embedding models with install status."""
base_url = args.base_url or DEFAULT_OLLAMA_URL
+ # Get Ollama version for compatibility checking
+ ollama_version = get_ollama_version(base_url)
+
# Get currently installed models
result = fetch_ollama_api(base_url, "api/tags")
installed_names = set()
@@ -330,17 +400,30 @@ def cmd_get_recommended_models(args) -> None:
installed_names.add(name)
installed_names.add(base_name)
- # Build recommended list with install status
+ # Build recommended list with install status and compatibility
recommended = []
for model in RECOMMENDED_EMBEDDING_MODELS:
name = model["name"]
base_name = name.split(":")[0] if ":" in name else name
is_installed = name in installed_names or base_name in installed_names
+ # Check version compatibility
+ min_version = model.get("min_ollama_version")
+ is_compatible = True
+ compatibility_note = None
+ if min_version and ollama_version:
+ is_compatible = version_gte(ollama_version, min_version)
+ if not is_compatible:
+ compatibility_note = f"Requires Ollama {min_version}+"
+ elif min_version and not ollama_version:
+ compatibility_note = "Version compatibility could not be verified"
+
recommended.append(
{
**model,
"installed": is_installed,
+ "compatible": is_compatible,
+ "compatibility_note": compatibility_note,
}
)
@@ -350,6 +433,7 @@ def cmd_get_recommended_models(args) -> None:
"recommended": recommended,
"count": len(recommended),
"url": base_url,
+ "ollama_version": ollama_version,
},
)
@@ -363,6 +447,19 @@ def cmd_pull_model(args) -> None:
output_error("Model name is required")
return
+ # Check Ollama version compatibility before attempting pull
+ ollama_version = get_ollama_version(base_url)
+ min_version = get_model_min_version(model_name)
+
+ if min_version and ollama_version:
+ if not version_gte(ollama_version, min_version):
+ output_error(
+ f"Model '{model_name}' requires Ollama {min_version} or newer. "
+ f"Your version is {ollama_version}. "
+ f"Please upgrade Ollama: https://ollama.com/download"
+ )
+ return
+
try:
url = f"{base_url.rstrip('/')}/api/pull"
data = json.dumps({"name": model_name}).encode("utf-8")
@@ -376,6 +473,22 @@ def cmd_pull_model(args) -> None:
try:
progress = json.loads(line.decode("utf-8"))
+ # Check for error in the streaming response
+ # This handles cases like "requires newer version of Ollama"
+ if "error" in progress:
+ error_msg = progress["error"]
+ # Clean up the error message (remove extra whitespace/newlines)
+ error_msg = " ".join(error_msg.split())
+ # Check if it's a version-related error
+ if "newer version" in error_msg.lower():
+ error_msg = (
+ f"Model '{model_name}' requires a newer version of Ollama. "
+ f"Your version: {ollama_version or 'unknown'}. "
+ f"Please upgrade: https://ollama.com/download"
+ )
+ output_error(error_msg)
+ return
+
# Emit progress as NDJSON to stderr for main process to parse
if "completed" in progress and "total" in progress:
print(
diff --git a/apps/backend/phase_config.py b/apps/backend/phase_config.py
index f7b85cdee5..3fc9ba74ef 100644
--- a/apps/backend/phase_config.py
+++ b/apps/backend/phase_config.py
@@ -7,6 +7,7 @@
"""
import json
+import os
from pathlib import Path
from typing import Literal, TypedDict
@@ -46,10 +47,10 @@
"complexity_assessment": "medium",
}
-# Default phase configuration (matches UI defaults)
+# Default phase configuration (fallback, matches 'Balanced' profile)
DEFAULT_PHASE_MODELS: dict[str, str] = {
"spec": "sonnet",
- "planning": "opus",
+ "planning": "sonnet", # Changed from "opus" (fix #433)
"coding": "sonnet",
"qa": "sonnet",
}
@@ -94,17 +95,34 @@ def resolve_model_id(model: str) -> str:
Resolve a model shorthand (haiku, sonnet, opus) to a full model ID.
If the model is already a full ID, return it unchanged.
+ Priority:
+ 1. Environment variable override (from API Profile)
+ 2. Hardcoded MODEL_ID_MAP
+ 3. Pass through unchanged (assume full model ID)
+
Args:
model: Model shorthand or full ID
Returns:
Full Claude model ID
"""
- # Check if it's a shorthand
+ # Check for environment variable override (from API Profile custom model mappings)
if model in MODEL_ID_MAP:
+ env_var_map = {
+ "haiku": "ANTHROPIC_DEFAULT_HAIKU_MODEL",
+ "sonnet": "ANTHROPIC_DEFAULT_SONNET_MODEL",
+ "opus": "ANTHROPIC_DEFAULT_OPUS_MODEL",
+ }
+ env_var = env_var_map.get(model)
+ if env_var:
+ env_value = os.environ.get(env_var)
+ if env_value:
+ return env_value
+
+ # Fall back to hardcoded mapping
return MODEL_ID_MAP[model]
- # Already a full model ID
+ # Already a full model ID or unknown shorthand
return model
diff --git a/apps/backend/project/command_registry/languages.py b/apps/backend/project/command_registry/languages.py
index cd10b0d6b1..e91787eb4e 100644
--- a/apps/backend/project/command_registry/languages.py
+++ b/apps/backend/project/command_registry/languages.py
@@ -173,12 +173,16 @@
"zig",
},
"dart": {
+ # Core Dart CLI (modern unified tool)
"dart",
+ "pub",
+ # Flutter CLI (included in Dart language for SDK detection)
+ "flutter",
+ # Legacy commands (deprecated but may exist in older projects)
"dart2js",
"dartanalyzer",
"dartdoc",
"dartfmt",
- "pub",
},
}
diff --git a/apps/backend/project/command_registry/package_managers.py b/apps/backend/project/command_registry/package_managers.py
index 46b30b3712..bf6c1d978a 100644
--- a/apps/backend/project/command_registry/package_managers.py
+++ b/apps/backend/project/command_registry/package_managers.py
@@ -33,6 +33,9 @@
"brew": {"brew"},
"apt": {"apt", "apt-get", "dpkg"},
"nix": {"nix", "nix-shell", "nix-build", "nix-env"},
+ # Dart/Flutter package managers
+ "pub": {"pub", "dart"},
+ "melos": {"melos", "dart", "flutter"},
}
diff --git a/apps/backend/project/command_registry/version_managers.py b/apps/backend/project/command_registry/version_managers.py
index b4356d0449..04e8e3925b 100644
--- a/apps/backend/project/command_registry/version_managers.py
+++ b/apps/backend/project/command_registry/version_managers.py
@@ -23,6 +23,8 @@
"rustup": {"rustup"},
"sdkman": {"sdk"},
"jabba": {"jabba"},
+ # Dart/Flutter version managers
+ "fvm": {"fvm", "flutter"},
}
diff --git a/apps/backend/project/stack_detector.py b/apps/backend/project/stack_detector.py
index 051c685c93..0fa67c29b3 100644
--- a/apps/backend/project/stack_detector.py
+++ b/apps/backend/project/stack_detector.py
@@ -164,6 +164,12 @@ def detect_package_managers(self) -> None:
if self.parser.file_exists("build.gradle", "build.gradle.kts"):
self.stack.package_managers.append("gradle")
+ # Dart/Flutter package managers
+ if self.parser.file_exists("pubspec.yaml", "pubspec.lock"):
+ self.stack.package_managers.append("pub")
+ if self.parser.file_exists("melos.yaml"):
+ self.stack.package_managers.append("melos")
+
def detect_databases(self) -> None:
"""Detect databases from config files and dependencies."""
# Check for database config files
@@ -358,3 +364,6 @@ def detect_version_managers(self) -> None:
self.stack.version_managers.append("rbenv")
if self.parser.file_exists("rust-toolchain.toml", "rust-toolchain"):
self.stack.version_managers.append("rustup")
+ # Flutter Version Manager
+ if self.parser.file_exists(".fvm", ".fvmrc", "fvm_config.json"):
+ self.stack.version_managers.append("fvm")
diff --git a/apps/backend/prompts/coder.md b/apps/backend/prompts/coder.md
index c9cde7f3c2..8b0acd9ef1 100644
--- a/apps/backend/prompts/coder.md
+++ b/apps/backend/prompts/coder.md
@@ -22,6 +22,68 @@ environment at the start of each prompt in the "YOUR ENVIRONMENT" section. Pay c
---
+## 🚨 CRITICAL: PATH CONFUSION PREVENTION 🚨
+
+**THE #1 BUG IN MONOREPOS: Doubled paths after `cd` commands**
+
+### The Problem
+
+After running `cd ./apps/frontend`, your current directory changes. If you then use paths like `apps/frontend/src/file.ts`, you're creating **doubled paths** like `apps/frontend/apps/frontend/src/file.ts`.
+
+### The Solution: ALWAYS CHECK YOUR CWD
+
+**BEFORE every git command or file operation:**
+
+```bash
+# Step 1: Check where you are
+pwd
+
+# Step 2: Use paths RELATIVE TO CURRENT DIRECTORY
+# If pwd shows: /path/to/project/apps/frontend
+# Then use: git add src/file.ts
+# NOT: git add apps/frontend/src/file.ts
+```
+
+### Examples
+
+**❌ WRONG - Path gets doubled:**
+```bash
+cd ./apps/frontend
+git add apps/frontend/src/file.ts # Looks for apps/frontend/apps/frontend/src/file.ts
+```
+
+**✅ CORRECT - Use relative path from current directory:**
+```bash
+cd ./apps/frontend
+pwd # Shows: /path/to/project/apps/frontend
+git add src/file.ts # Correctly adds apps/frontend/src/file.ts from project root
+```
+
+**✅ ALSO CORRECT - Stay at root, use full relative path:**
+```bash
+# Don't change directory at all
+git add ./apps/frontend/src/file.ts # Works from project root
+```
+
+### Mandatory Pre-Command Check
+
+**Before EVERY git add, git commit, or file operation in a monorepo:**
+
+```bash
+# 1. Where am I?
+pwd
+
+# 2. What files am I targeting?
+ls -la [target-path] # Verify the path exists
+
+# 3. Only then run the command
+git add [verified-path]
+```
+
+**This check takes 2 seconds and prevents hours of debugging.**
+
+---
+
## STEP 1: GET YOUR BEARINGS (MANDATORY)
First, check your environment. The prompt should tell you your working directory and spec location.
@@ -358,6 +420,20 @@ In your response, acknowledge the checklist:
## STEP 6: IMPLEMENT THE SUBTASK
+### Verify Your Location FIRST
+
+**MANDATORY: Before implementing anything, confirm where you are:**
+
+```bash
+# This should match the "Working Directory" in YOUR ENVIRONMENT section above
+pwd
+```
+
+If you change directories during implementation (e.g., `cd apps/frontend`), remember:
+- Your file paths must be RELATIVE TO YOUR NEW LOCATION
+- Before any git operation, run `pwd` again to verify your location
+- See the "PATH CONFUSION PREVENTION" section above for examples
+
### Mark as In Progress
Update `implementation_plan.json`:
@@ -618,6 +694,31 @@ After successful verification, update the subtask:
## STEP 9: COMMIT YOUR PROGRESS
+### Path Verification (MANDATORY FIRST STEP)
+
+**🚨 BEFORE running ANY git commands, verify your current directory:**
+
+```bash
+# Step 1: Where am I?
+pwd
+
+# Step 2: What files do I want to commit?
+# If you changed to a subdirectory (e.g., cd apps/frontend),
+# you need to use paths RELATIVE TO THAT DIRECTORY, not from project root
+
+# Step 3: Verify paths exist
+ls -la [path-to-files] # Make sure the path is correct from your current location
+
+# Example in a monorepo:
+# If pwd shows: /project/apps/frontend
+# Then use: git add src/file.ts
+# NOT: git add apps/frontend/src/file.ts (this would look for apps/frontend/apps/frontend/src/file.ts)
+```
+
+**CRITICAL RULE:** If you're in a subdirectory, either:
+- **Option A:** Return to project root: `cd [back to working directory]`
+- **Option B:** Use paths relative to your CURRENT directory (check with `pwd`)
+
### Secret Scanning (Automatic)
The system **automatically scans for secrets** before every commit. If secrets are detected, the commit will be blocked and you'll receive detailed instructions on how to fix it.
@@ -634,7 +735,7 @@ The system **automatically scans for secrets** before every commit. If secrets a
api_key = os.environ.get("API_KEY")
```
3. **Update .env.example** - Add placeholder for the new variable
-4. **Re-stage and retry** - `git add . && git commit ...`
+4. **Re-stage and retry** - `git add . ':!.auto-claude' && git commit ...`
**If it's a false positive:**
- Add the file pattern to `.secretsignore` in the project root
@@ -643,7 +744,17 @@ The system **automatically scans for secrets** before every commit. If secrets a
### Create the Commit
```bash
-git add .
+# FIRST: Make sure you're in the working directory root (check YOUR ENVIRONMENT section at top)
+pwd # Should match your working directory
+
+# Add all files EXCEPT .auto-claude directory (spec files should never be committed)
+git add . ':!.auto-claude'
+
+# If git add fails with "pathspec did not match", you have a path problem:
+# 1. Run pwd to see where you are
+# 2. Run git status to see what git sees
+# 3. Adjust your paths accordingly
+
git commit -m "auto-claude: Complete [subtask-id] - [subtask description]
- Files modified: [list]
@@ -651,6 +762,9 @@ git commit -m "auto-claude: Complete [subtask-id] - [subtask description]
- Phase progress: [X]/[Y] subtasks complete"
```
+**CRITICAL**: The `:!.auto-claude` pathspec exclusion ensures spec files are NEVER committed.
+These are internal tracking files that must stay local.
+
### DO NOT Push to Remote
**IMPORTANT**: Do NOT run `git push`. All work stays local until the user reviews and approves.
@@ -956,6 +1070,17 @@ Prepare → Test (small batch) → Execute (full) → Cleanup
- Clean, working state
- **Secret scan must pass before commit**
+### Git Configuration - NEVER MODIFY
+**CRITICAL**: You MUST NOT modify git user configuration. Never run:
+- `git config user.name`
+- `git config user.email`
+- `git config --local user.*`
+- `git config --global user.*`
+
+The repository inherits the user's configured git identity. Creating "Test User" or
+any other fake identity breaks attribution and causes serious issues. If you need
+to commit changes, use the existing git identity - do NOT set a new one.
+
### The Golden Rule
**FIX BUGS NOW.** The next session has no memory.
diff --git a/apps/backend/prompts/github/pr_codebase_fit_agent.md b/apps/backend/prompts/github/pr_codebase_fit_agent.md
index f9e14e1e3f..9a14b56dbc 100644
--- a/apps/backend/prompts/github/pr_codebase_fit_agent.md
+++ b/apps/backend/prompts/github/pr_codebase_fit_agent.md
@@ -6,6 +6,23 @@ You are a focused codebase fit review agent. You have been spawned by the orches
Ensure new code integrates well with the existing codebase. Check for consistency with project conventions, reuse of existing utilities, and architectural alignment. Focus ONLY on codebase fit - not security, logic correctness, or general quality.
+## CRITICAL: PR Scope and Context
+
+### What IS in scope (report these issues):
+1. **Codebase fit issues in changed code** - New code not following project patterns
+2. **Missed reuse opportunities** - "Existing `utils.ts` has a helper for this"
+3. **Inconsistent with PR's own changes** - "You used `camelCase` here but `snake_case` elsewhere in the PR"
+4. **Breaking conventions in touched areas** - "Your change deviates from the pattern in this file"
+
+### What is NOT in scope (do NOT report):
+1. **Pre-existing inconsistencies** - Old code that doesn't follow patterns
+2. **Unrelated suggestions** - Don't suggest patterns for code the PR didn't touch
+
+**Key distinction:**
+- ✅ "Your new component doesn't follow the existing pattern in `components/`" - GOOD
+- ✅ "Consider using existing `formatDate()` helper instead of new implementation" - GOOD
+- ❌ "The old `legacy/` folder uses different naming conventions" - BAD (pre-existing)
+
## Codebase Fit Focus Areas
### 1. Naming Conventions
diff --git a/apps/backend/prompts/github/pr_finding_validator.md b/apps/backend/prompts/github/pr_finding_validator.md
index b054344ea9..6421e37132 100644
--- a/apps/backend/prompts/github/pr_finding_validator.md
+++ b/apps/backend/prompts/github/pr_finding_validator.md
@@ -1,16 +1,37 @@
# Finding Validator Agent
-You are a finding re-investigator. For each unresolved finding from a previous PR review, you must actively investigate whether it is a REAL issue or a FALSE POSITIVE.
+You are a finding re-investigator using EVIDENCE-BASED VALIDATION. For each unresolved finding from a previous PR review, you must actively investigate whether it is a REAL issue or a FALSE POSITIVE.
+
+**Core Principle: Evidence, not confidence scores.** Either you can prove the issue exists with actual code, or you can't. There is no middle ground.
Your job is to prevent false positives from persisting indefinitely by actually reading the code and verifying the issue exists.
+## CRITICAL: Check PR Scope First
+
+**Before investigating any finding, verify it's within THIS PR's scope:**
+
+1. **Check if the file is in the PR's changed files list** - If not, likely out-of-scope
+2. **Check if the line number exists** - If finding cites line 710 but file has 600 lines, it's hallucinated
+3. **Check for PR references in commit messages** - Commits like `fix: something (#584)` are from OTHER PRs
+
+**Dismiss findings as `dismissed_false_positive` if:**
+- The finding references a file NOT in the PR's changed files list AND is not about impact on that file
+- The line number doesn't exist in the file (hallucinated)
+- The finding is about code from a merged branch commit (not this PR's work)
+
+**Keep findings valid if they're about:**
+- Issues in code the PR actually changed
+- Impact of PR changes on other code (e.g., "this change breaks callers in X")
+- Missing updates to related code (e.g., "you updated A but forgot B")
+
## Your Mission
For each finding you receive:
-1. **READ** the actual code at the file/line location using the Read tool
-2. **ANALYZE** whether the described issue actually exists in the code
-3. **PROVIDE** concrete code evidence for your conclusion
-4. **RETURN** validation status with evidence
+1. **VERIFY SCOPE** - Is this file/line actually part of this PR?
+2. **READ** the actual code at the file/line location using the Read tool
+3. **ANALYZE** whether the described issue actually exists in the code
+4. **PROVIDE** concrete code evidence - the actual code that proves or disproves the issue
+5. **RETURN** validation status with evidence (binary decision based on what the code shows)
## Investigation Process
@@ -24,45 +45,61 @@ Read the file: {finding.file}
Focus on lines around: {finding.line}
```
-### Step 2: Analyze with Fresh Eyes
+### Step 2: Analyze with Fresh Eyes - NEVER ASSUME
+
+**CRITICAL: Do NOT assume the original finding is correct.** The original reviewer may have:
+- Hallucinated line numbers that don't exist
+- Misread or misunderstood the code
+- Missed validation/sanitization in callers or surrounding code
+- Made assumptions without actually reading the implementation
+- Confused similar-looking code patterns
+
+**You MUST actively verify by asking:**
+- Does the code at this exact line ACTUALLY have this issue?
+- Did I READ the actual implementation, not just the function name?
+- Is there validation/sanitization BEFORE this code is reached?
+- Is there framework protection I'm not accounting for?
+- Does this line number even EXIST in the file?
-**Do NOT assume the original finding is correct.** Ask yourself:
-- Does the code ACTUALLY have this issue?
-- Is the described vulnerability/bug/problem present?
-- Could the original reviewer have misunderstood the code?
-- Is there context that makes this NOT an issue (e.g., sanitization elsewhere)?
+**NEVER:**
+- Trust the finding description without reading the code
+- Assume a function is vulnerable based on its name
+- Skip checking surrounding context (±20 lines minimum)
+- Confirm a finding just because "it sounds plausible"
-Be skeptical. The original review may have hallucinated this finding.
+Be HIGHLY skeptical. AI reviews frequently produce false positives. Your job is to catch them.
### Step 3: Document Evidence
You MUST provide concrete evidence:
-- **Exact code snippet** you examined (copy-paste from the file)
+- **Exact code snippet** you examined (copy-paste from the file) - this is the PROOF
- **Line numbers** where you found (or didn't find) the issue
-- **Your analysis** of whether the issue exists
-- **Confidence level** (0.0-1.0) in your conclusion
+- **Your analysis** connecting the code to your conclusion
+- **Verification flag** - did this code actually exist at the specified location?
## Validation Statuses
### `confirmed_valid`
-Use when you verify the issue IS real:
+Use when your code evidence PROVES the issue IS real:
- The problematic code pattern exists exactly as described
-- The vulnerability/bug is present and exploitable
+- You can point to the specific lines showing the vulnerability/bug
- The code quality issue genuinely impacts the codebase
+- **Key question**: Does your code_evidence field contain the actual problematic code?
### `dismissed_false_positive`
-Use when you verify the issue does NOT exist:
-- The described code pattern is not actually present
-- The original finding misunderstood the code
-- There is mitigating code that prevents the issue (e.g., input validation elsewhere)
-- The finding was based on incorrect assumptions
+Use when your code evidence PROVES the issue does NOT exist:
+- The described code pattern is not actually present (code_evidence shows different code)
+- There is mitigating code that prevents the issue (code_evidence shows the mitigation)
+- The finding was based on incorrect assumptions (code_evidence shows reality)
+- The line number doesn't exist or contains different code than claimed
+- **Key question**: Does your code_evidence field show code that disproves the original finding?
### `needs_human_review`
-Use when you cannot determine with confidence:
-- The issue requires runtime analysis to verify
+Use when you CANNOT find definitive evidence either way:
+- The issue requires runtime analysis to verify (static code doesn't prove/disprove)
- The code is too complex to analyze statically
-- You have conflicting evidence
-- Your confidence is below 0.70
+- You found the code but can't determine if it's actually a problem
+- **Key question**: Is your code_evidence inconclusive?
## Output Format
@@ -75,7 +112,7 @@ Return one result per finding:
"code_evidence": "const query = `SELECT * FROM users WHERE id = ${userId}`;",
"line_range": [45, 45],
"explanation": "SQL injection vulnerability confirmed. User input 'userId' is directly interpolated into the SQL query at line 45 without any sanitization. The query is executed via db.execute() on line 46.",
- "confidence": 0.95
+ "evidence_verified_in_file": true
}
```
@@ -85,8 +122,8 @@ Return one result per finding:
"validation_status": "dismissed_false_positive",
"code_evidence": "function processInput(data: string): string {\n const sanitized = DOMPurify.sanitize(data);\n return sanitized;\n}",
"line_range": [23, 26],
- "explanation": "The original finding claimed XSS vulnerability, but the code uses DOMPurify.sanitize() before output. The input is properly sanitized at line 24 before being returned.",
- "confidence": 0.88
+ "explanation": "The original finding claimed XSS vulnerability, but the code uses DOMPurify.sanitize() before output. The input is properly sanitized at line 24 before being returned. The code evidence proves the issue does NOT exist.",
+ "evidence_verified_in_file": true
}
```
@@ -96,38 +133,56 @@ Return one result per finding:
"validation_status": "needs_human_review",
"code_evidence": "async function handleRequest(req) {\n // Complex async logic...\n}",
"line_range": [100, 150],
- "explanation": "The original finding claims a race condition, but verifying this requires understanding the runtime behavior and concurrency model. Cannot determine statically.",
- "confidence": 0.45
+ "explanation": "The original finding claims a race condition, but verifying this requires understanding the runtime behavior and concurrency model. The static code doesn't provide definitive evidence either way.",
+ "evidence_verified_in_file": true
}
```
-## Confidence Guidelines
+```json
+{
+ "finding_id": "HALLUC-004",
+ "validation_status": "dismissed_false_positive",
+ "code_evidence": "// Line 710 does not exist - file only has 600 lines",
+ "line_range": [600, 600],
+ "explanation": "The original finding claimed an issue at line 710, but the file only has 600 lines. This is a hallucinated finding - the code doesn't exist.",
+ "evidence_verified_in_file": false
+}
+```
+
+## Evidence Guidelines
-Rate your confidence based on how certain you are:
+Validation is binary based on what the code evidence shows:
-| Confidence | Meaning |
-|------------|---------|
-| 0.90-1.00 | Definitive evidence - code clearly shows the issue exists/doesn't exist |
-| 0.80-0.89 | Strong evidence - high confidence with minor uncertainty |
-| 0.70-0.79 | Moderate evidence - likely correct but some ambiguity |
-| 0.50-0.69 | Uncertain - use `needs_human_review` |
-| Below 0.50 | Insufficient evidence - must use `needs_human_review` |
+| Scenario | Status | Evidence Required |
+|----------|--------|-------------------|
+| Code shows the exact problem claimed | `confirmed_valid` | Problematic code snippet |
+| Code shows issue doesn't exist or is mitigated | `dismissed_false_positive` | Code proving issue is absent |
+| Code couldn't be found (hallucinated line/file) | `dismissed_false_positive` | Note that code doesn't exist |
+| Code found but can't prove/disprove statically | `needs_human_review` | The inconclusive code |
-**Minimum thresholds:**
-- To confirm as `confirmed_valid`: confidence >= 0.70
-- To dismiss as `dismissed_false_positive`: confidence >= 0.80 (higher bar for dismissal)
-- If below thresholds: must use `needs_human_review`
+**Decision rules:**
+- If `code_evidence` contains problematic code → `confirmed_valid`
+- If `code_evidence` proves issue doesn't exist → `dismissed_false_positive`
+- If `evidence_verified_in_file` is false → `dismissed_false_positive` (hallucinated finding)
+- If you can't determine from the code → `needs_human_review`
## Common False Positive Patterns
Watch for these patterns that often indicate false positives:
-1. **Sanitization elsewhere**: Input is validated/sanitized before reaching the flagged code
-2. **Internal-only code**: Code only handles trusted internal data, not user input
-3. **Framework protection**: Framework provides automatic protection (e.g., ORM parameterization)
-4. **Dead code**: The flagged code is never executed in the current codebase
-5. **Test code**: The issue is in test files where it's acceptable
-6. **Misread syntax**: Original reviewer misunderstood the language syntax
+1. **Non-existent line number**: The line number cited doesn't exist or is beyond EOF - hallucinated finding
+2. **Merged branch code**: Finding is about code from a commit like `fix: something (#584)` - another PR
+3. **Pre-existing issue, not impact**: Finding flags old bug in untouched code without showing how PR changes relate
+4. **Sanitization elsewhere**: Input is validated/sanitized before reaching the flagged code
+5. **Internal-only code**: Code only handles trusted internal data, not user input
+6. **Framework protection**: Framework provides automatic protection (e.g., ORM parameterization)
+7. **Dead code**: The flagged code is never executed in the current codebase
+8. **Test code**: The issue is in test files where it's acceptable
+9. **Misread syntax**: Original reviewer misunderstood the language syntax
+
+**Note**: Findings about files outside the PR's changed list are NOT automatically false positives if they're about:
+- Impact of PR changes on that file (e.g., "your change breaks X")
+- Missing related updates (e.g., "you forgot to update Y")
## Common Valid Issue Patterns
@@ -144,15 +199,16 @@ These patterns often confirm the issue is real:
1. **ALWAYS read the actual code** - Never rely on memory or the original finding description
2. **ALWAYS provide code_evidence** - No empty strings. Quote the actual code.
3. **Be skeptical of original findings** - Many AI reviews produce false positives
-4. **Higher bar for dismissal** - Need 0.80 confidence to dismiss (vs 0.70 to confirm)
-5. **When uncertain, escalate** - Use `needs_human_review` rather than guessing
+4. **Evidence is binary** - The code either shows the problem or it doesn't
+5. **When evidence is inconclusive, escalate** - Use `needs_human_review` rather than guessing
6. **Look for mitigations** - Check surrounding code for sanitization/validation
7. **Check the full context** - Read ±20 lines, not just the flagged line
+8. **Verify code exists** - Set `evidence_verified_in_file` to false if the code/line doesn't exist
## Anti-Patterns to Avoid
-- **Trusting the original finding blindly** - Always verify
-- **Dismissing without reading code** - Must provide code_evidence
-- **Low confidence dismissals** - Needs 0.80+ confidence to dismiss
-- **Vague explanations** - Be specific about what you found
+- **Trusting the original finding blindly** - Always verify with actual code
+- **Dismissing without reading code** - Must provide code_evidence that proves your point
+- **Vague explanations** - Be specific about what the code shows and why it proves/disproves the issue
- **Missing line numbers** - Always include line_range
+- **Speculative conclusions** - Only conclude what the code evidence actually proves
diff --git a/apps/backend/prompts/github/pr_followup.md b/apps/backend/prompts/github/pr_followup.md
index 1e2fe04efb..423463f05b 100644
--- a/apps/backend/prompts/github/pr_followup.md
+++ b/apps/backend/prompts/github/pr_followup.md
@@ -71,10 +71,12 @@ Review the diff since the last review for NEW issues:
- Regressions that break previously working code
- Missing error handling in new code paths
-**Apply the 80% confidence threshold:**
-- Only report issues you're confident about
+**NEVER ASSUME - ALWAYS VERIFY:**
+- Actually READ the code before reporting any finding
+- Verify the issue exists at the exact line you cite
+- Check for validation/mitigation in surrounding code
- Don't re-report issues from the previous review
-- Focus on genuinely new problems
+- Focus on genuinely new problems with code EVIDENCE
### Phase 3: Comment Review
@@ -137,11 +139,11 @@ Return a JSON object with this structure:
"id": "new-finding-1",
"severity": "medium",
"category": "security",
- "confidence": 0.85,
"title": "New hardcoded API key in config",
"description": "A new API key was added in config.ts line 45 without using environment variables.",
"file": "src/config.ts",
"line": 45,
+ "evidence": "const API_KEY = 'sk-prod-abc123xyz789';",
"suggested_fix": "Move to environment variable: process.env.EXTERNAL_API_KEY"
}
],
@@ -175,11 +177,11 @@ Same format as initial review findings:
- **id**: Unique identifier for new finding
- **severity**: `critical` | `high` | `medium` | `low`
- **category**: `security` | `quality` | `logic` | `test` | `docs` | `pattern` | `performance`
-- **confidence**: Float 0.80-1.0
- **title**: Short summary (max 80 chars)
- **description**: Detailed explanation
- **file**: Relative file path
- **line**: Line number
+- **evidence**: **REQUIRED** - Actual code snippet proving the issue exists
- **suggested_fix**: How to resolve
### verdict
diff --git a/apps/backend/prompts/github/pr_followup_newcode_agent.md b/apps/backend/prompts/github/pr_followup_newcode_agent.md
index c35e84f876..5021113b97 100644
--- a/apps/backend/prompts/github/pr_followup_newcode_agent.md
+++ b/apps/backend/prompts/github/pr_followup_newcode_agent.md
@@ -11,6 +11,23 @@ Review the incremental diff for:
4. Potential regressions
5. Incomplete implementations
+## CRITICAL: PR Scope and Context
+
+### What IS in scope (report these issues):
+1. **Issues in changed code** - Problems in files/lines actually modified by this PR
+2. **Impact on unchanged code** - "This change breaks callers in `other_file.ts`"
+3. **Missing related changes** - "Similar pattern in `utils.ts` wasn't updated"
+4. **Incomplete implementations** - "New field added but not handled in serializer"
+
+### What is NOT in scope (do NOT report):
+1. **Pre-existing bugs** - Old bugs in code this PR didn't touch
+2. **Code from merged branches** - Commits with PR references like `(#584)` are from other PRs
+3. **Unrelated improvements** - Don't suggest refactoring untouched code
+
+**Key distinction:**
+- ✅ "Your change breaks the caller in `auth.ts`" - GOOD (impact analysis)
+- ❌ "The old code in `legacy.ts` has a bug" - BAD (pre-existing, not this PR)
+
## Focus Areas
Since this is a follow-up review, focus on:
@@ -74,15 +91,47 @@ Since this is a follow-up review, focus on:
- Minor optimizations
- Documentation gaps
-## Confidence Scoring
+## NEVER ASSUME - ALWAYS VERIFY
+
+**Before reporting ANY new finding:**
+
+1. **NEVER assume code is vulnerable** - Read the actual implementation
+2. **NEVER assume validation is missing** - Check callers and surrounding code
+3. **NEVER assume based on function names** - `unsafeQuery()` might actually be safe
+4. **NEVER report without reading the code** - Verify the issue exists at the exact line
+
+**You MUST:**
+- Actually READ the code at the file/line you cite
+- Verify there's no sanitization/validation before this code
+- Check for framework protections you might miss
+- Provide the actual code snippet as evidence
+
+### Verify Before Reporting "Missing" Safeguards
+
+For findings claiming something is **missing** (no fallback, no validation, no error handling):
+
+**Ask yourself**: "Have I verified this is actually missing, or did I just not see it?"
+
+- Read the **complete function/method** containing the issue, not just the flagged line
+- Check for guards, fallbacks, or defensive code that may appear later in the function
+- Look for comments indicating intentional design choices
+- If uncertain, use the Read/Grep tools to confirm
+
+**Your evidence must prove absence exists — not just that you didn't see it.**
+
+❌ **Weak**: "The code defaults to 'main' without checking if it exists"
+✅ **Strong**: "I read the complete `_detect_target_branch()` function. There is no existence check before the default return."
+
+**Only report if you can confidently say**: "I verified the complete scope and the safeguard does not exist."
+
+## Evidence Requirements
-Rate confidence (0.0-1.0) based on:
-- **>0.9**: Obvious, verifiable issue
-- **0.8-0.9**: High confidence with clear evidence
-- **0.7-0.8**: Likely issue but some uncertainty
-- **<0.7**: Possible issue, needs verification
+Every finding MUST include an `evidence` field with:
+- The actual problematic code copy-pasted from the diff
+- The specific line numbers where the issue exists
+- Proof that the issue is real, not speculative
-Only report findings with confidence >0.7.
+**No evidence = No finding**
## Output Format
@@ -99,7 +148,7 @@ Return findings in this structure:
"description": "The new login validation query concatenates user input directly into the SQL string without sanitization.",
"category": "security",
"severity": "critical",
- "confidence": 0.95,
+ "evidence": "query = f\"SELECT * FROM users WHERE email = '{email}'\"",
"suggested_fix": "Use parameterized queries: cursor.execute('SELECT * FROM users WHERE email = ?', (email,))",
"fixable": true,
"source_agent": "new-code-reviewer",
@@ -113,7 +162,7 @@ Return findings in this structure:
"description": "The fix for LOGIC-003 removed a null check that was protecting against undefined input. Now input.data can be null.",
"category": "regression",
"severity": "high",
- "confidence": 0.88,
+ "evidence": "result = input.data.process() # input.data can be null, was previously: if input and input.data:",
"suggested_fix": "Restore null check: if (input && input.data) { ... }",
"fixable": true,
"source_agent": "new-code-reviewer",
diff --git a/apps/backend/prompts/github/pr_followup_orchestrator.md b/apps/backend/prompts/github/pr_followup_orchestrator.md
index da2ee6b97a..4e714df4c3 100644
--- a/apps/backend/prompts/github/pr_followup_orchestrator.md
+++ b/apps/backend/prompts/github/pr_followup_orchestrator.md
@@ -9,6 +9,40 @@ Perform a focused, efficient follow-up review by:
2. Delegating to specialized agents based on what needs verification
3. Synthesizing findings into a final merge verdict
+## CRITICAL: PR Scope and Context
+
+### What IS in scope (report these issues):
+1. **Issues in changed code** - Problems in files/lines actually modified by this PR
+2. **Impact on unchanged code** - "You changed X but forgot to update Y that depends on it"
+3. **Missing related changes** - "This pattern also exists in Z, did you mean to update it too?"
+4. **Breaking changes** - "This change breaks callers in other files"
+
+### What is NOT in scope (do NOT report):
+1. **Pre-existing issues in unchanged code** - If old code has a bug but this PR didn't touch it, don't flag it
+2. **Code from merged branches** - Commits with PR references like `(#584)` are from OTHER already-reviewed PRs
+3. **Unrelated improvements** - Don't suggest refactoring code the PR didn't touch
+
+**Key distinction:**
+- ✅ "Your change to `validateUser()` breaks the caller in `auth.ts:45`" - GOOD (impact of PR changes)
+- ✅ "You updated this validation but similar logic in `utils.ts` wasn't updated" - GOOD (incomplete change)
+- ❌ "The existing code in `legacy.ts` has a SQL injection" - BAD (pre-existing issue, not this PR)
+- ❌ "This code from commit `fix: something (#584)` has an issue" - BAD (different PR)
+
+**Why this matters:**
+When authors merge the base branch into their feature branch, the commit range includes commits from other PRs. The context gathering system filters these out, but if any slip through, recognize them as out-of-scope.
+
+## Merge Conflicts
+
+**Check for merge conflicts in the follow-up context.** If `has_merge_conflicts` is `true`:
+
+1. **Report this prominently** - Merge conflicts block the PR from being merged
+2. **Add a CRITICAL finding** with category "merge_conflict" and severity "critical"
+3. **Include in verdict reasoning** - The PR cannot be merged until conflicts are resolved
+4. **This may be NEW since last review** - Base branch may have changed
+
+Note: GitHub's API tells us IF there are conflicts but not WHICH files. The finding should state:
+> "This PR has merge conflicts with the base branch that must be resolved before merging."
+
## Available Specialist Agents
You have access to these specialist agents via the Task tool:
@@ -97,7 +131,21 @@ After all agents complete:
## Verdict Guidelines
+### CRITICAL: CI Status ALWAYS Factors Into Verdict
+
+**CI status is provided in the context and MUST be considered:**
+
+- ❌ **Failing CI = BLOCKED** - If ANY CI checks are failing, verdict MUST be BLOCKED regardless of code quality
+- ⏳ **Pending CI = NEEDS_REVISION** - If CI is still running, verdict cannot be READY_TO_MERGE
+- ⏸️ **Awaiting approval = BLOCKED** - Fork PR workflows awaiting maintainer approval block merge
+- ✅ **All passing = Continue with code analysis** - Only then do code findings determine verdict
+
+**Always mention CI status in your verdict_reasoning.** For example:
+- "BLOCKED: 2 CI checks failing (CodeQL, test-frontend). Fix CI before merge."
+- "READY_TO_MERGE: All CI checks passing and all findings resolved."
+
### READY_TO_MERGE
+- **All CI checks passing** (no failing, no pending)
- All previous findings verified as resolved OR dismissed as false positives
- No CONFIRMED_VALID critical/high issues remaining
- No new critical/high issues
@@ -105,11 +153,13 @@ After all agents complete:
- Contributor questions addressed
### MERGE_WITH_CHANGES
+- **All CI checks passing**
- Previous findings resolved
- Only LOW severity new issues (suggestions)
- Optional polish items can be addressed post-merge
### NEEDS_REVISION (Strict Quality Gates)
+- **CI checks pending** OR
- HIGH or MEDIUM severity findings CONFIRMED_VALID (not dismissed as false positive)
- New HIGH or MEDIUM severity issues introduced
- Important contributor concerns unaddressed
@@ -117,6 +167,8 @@ After all agents complete:
- **Note: Only count findings that passed validation** (dismissed_false_positive findings don't block)
### BLOCKED
+- **Any CI checks failing** OR
+- **Workflows awaiting maintainer approval** (fork PRs) OR
- CRITICAL findings remain CONFIRMED_VALID (not dismissed as false positive)
- New CRITICAL issues introduced
- Fundamental problems with the fix approach
@@ -171,16 +223,36 @@ Provide your synthesis as a structured response matching the ParallelFollowupRes
}
```
+## CRITICAL: NEVER ASSUME - ALWAYS VERIFY
+
+**This applies to ALL agents you invoke:**
+
+1. **NEVER assume a finding is valid** - The finding-validator MUST read the actual code
+2. **NEVER assume a fix is correct** - The resolution-verifier MUST verify the change
+3. **NEVER assume line numbers are accurate** - Files may be shorter than cited lines
+4. **NEVER assume validation is missing** - Check callers and surrounding code
+5. **NEVER trust the original finding's description** - It may have been hallucinated
+
+**Before ANY finding blocks merge:**
+- The actual code at that location MUST be read
+- The problematic pattern MUST exist as described
+- There MUST NOT be mitigation/validation elsewhere
+- The evidence MUST be copy-pasted from the actual file
+
+**Why this matters:** AI reviewers sometimes hallucinate findings. Without verification,
+false positives persist forever and developers lose trust in the review system.
+
## Important Notes
1. **Be efficient**: Follow-up reviews should be faster than initial reviews
2. **Focus on changes**: Only review what changed since last review
-3. **Trust but verify**: Don't assume fixes are correct just because files changed
+3. **VERIFY, don't assume**: Don't assume fixes are correct OR that findings are valid
4. **Acknowledge progress**: Recognize genuine effort to address feedback
5. **Be specific**: Clearly state what blocks merge if verdict is not READY_TO_MERGE
## Context You Will Receive
+- **CI Status (CRITICAL)** - Passing/failing/pending checks and specific failed check names
- Previous review summary and findings
- New commits since last review (SHAs, messages)
- Diff of changes since last review
diff --git a/apps/backend/prompts/github/pr_followup_resolution_agent.md b/apps/backend/prompts/github/pr_followup_resolution_agent.md
index c0e4c38f15..9e35b827db 100644
--- a/apps/backend/prompts/github/pr_followup_resolution_agent.md
+++ b/apps/backend/prompts/github/pr_followup_resolution_agent.md
@@ -10,6 +10,23 @@ For each previous finding, determine whether it has been:
- **unresolved**: The issue remains or wasn't addressed
- **cant_verify**: Not enough information to determine status
+## CRITICAL: Verify Finding is In-Scope
+
+**Before verifying any finding, check if it's within THIS PR's scope:**
+
+1. **Is the file in the PR's changed files list?** - If not AND the finding isn't about impact, mark as `cant_verify`
+2. **Does the line number exist?** - If finding cites line 710 but file has 600 lines, it was hallucinated
+3. **Was this from a merged branch?** - Commits with PR references like `(#584)` are from other PRs
+
+**Mark as `cant_verify` if:**
+- Finding references a file not in PR AND is not about impact of PR changes on that file
+- Line number doesn't exist (hallucinated finding)
+- Finding is about code from another PR's commits
+
+**Findings can reference files outside the PR if they're about:**
+- Impact of PR changes (e.g., "change to X breaks caller in Y")
+- Missing related updates (e.g., "you updated A but forgot B")
+
## Verification Process
For each previous finding:
@@ -31,12 +48,26 @@ If the file was modified:
- Is the fix approach sound?
- Are there edge cases the fix misses?
-### 4. Assign Confidence
-Rate your confidence (0.0-1.0):
-- **>0.9**: Clear evidence of resolution/non-resolution
-- **0.7-0.9**: Strong indicators but some uncertainty
-- **0.5-0.7**: Mixed signals, moderate confidence
-- **<0.5**: Unclear, consider marking as cant_verify
+### 4. Provide Evidence
+For each verification, provide actual code evidence:
+- **Copy-paste the relevant code** you examined
+- **Show what changed** - before vs after
+- **Explain WHY** this proves resolution/non-resolution
+
+## NEVER ASSUME - ALWAYS VERIFY
+
+**Before marking ANY finding as resolved or unresolved:**
+
+1. **NEVER assume a fix is correct** based on commit messages alone - READ the actual code
+2. **NEVER assume the original finding was accurate** - The line might not even exist
+3. **NEVER assume a renamed variable fixes a bug** - Check the actual logic changed
+4. **NEVER assume "file was modified" means "issue was fixed"** - Verify the specific fix
+
+**You MUST:**
+- Read the actual code at the cited location
+- Verify the problematic pattern no longer exists (for resolved)
+- Verify the pattern still exists (for unresolved)
+- Check surrounding context for alternative fixes you might miss
## Resolution Criteria
@@ -84,23 +115,20 @@ Return verifications in this structure:
{
"finding_id": "SEC-001",
"status": "resolved",
- "confidence": 0.92,
- "evidence": "The SQL query at line 45 now uses parameterized queries instead of string concatenation. The fix properly escapes all user inputs.",
- "resolution_notes": "Changed from f-string to cursor.execute() with parameters"
+ "evidence": "cursor.execute('SELECT * FROM users WHERE id = ?', (user_id,))",
+ "resolution_notes": "Changed from f-string to cursor.execute() with parameters. The code at line 45 now uses parameterized queries."
},
{
"finding_id": "QUAL-002",
"status": "partially_resolved",
- "confidence": 0.75,
- "evidence": "Error handling was added for the main path, but the fallback path at line 78 still lacks try-catch.",
+ "evidence": "try:\n result = process(data)\nexcept Exception as e:\n log.error(e)\n# But fallback path at line 78 still has: result = fallback(data) # no try-catch",
"resolution_notes": "Main function fixed, helper function still needs work"
},
{
"finding_id": "LOGIC-003",
"status": "unresolved",
- "confidence": 0.88,
- "evidence": "The off-by-one error remains. The loop still uses `<= length` instead of `< length`.",
- "resolution_notes": null
+ "evidence": "for i in range(len(items) + 1): # Still uses <= length",
+ "resolution_notes": "The off-by-one error remains at line 52."
}
]
```
diff --git a/apps/backend/prompts/github/pr_logic_agent.md b/apps/backend/prompts/github/pr_logic_agent.md
index 5b81b2bd6a..328ba13d06 100644
--- a/apps/backend/prompts/github/pr_logic_agent.md
+++ b/apps/backend/prompts/github/pr_logic_agent.md
@@ -6,6 +6,23 @@ You are a focused logic and correctness review agent. You have been spawned by t
Verify that the code logic is correct, handles all edge cases, and doesn't introduce subtle bugs. Focus ONLY on logic and correctness issues - not style, security, or general quality.
+## CRITICAL: PR Scope and Context
+
+### What IS in scope (report these issues):
+1. **Logic issues in changed code** - Bugs in files/lines modified by this PR
+2. **Logic impact of changes** - "This change breaks the assumption in `caller.ts:50`"
+3. **Incomplete state changes** - "You updated state X but forgot to reset Y"
+4. **Edge cases in new code** - "New function doesn't handle empty array case"
+
+### What is NOT in scope (do NOT report):
+1. **Pre-existing bugs** - Old logic issues in untouched code
+2. **Unrelated improvements** - Don't suggest fixing bugs in code the PR didn't touch
+
+**Key distinction:**
+- ✅ "Your change to `sort()` breaks callers expecting stable order" - GOOD (impact analysis)
+- ✅ "Off-by-one error in your new loop" - GOOD (new code)
+- ❌ "The old `parser.ts` has a race condition" - BAD (pre-existing, not this PR)
+
## Logic Focus Areas
### 1. Algorithm Correctness
@@ -61,6 +78,21 @@ Verify that the code logic is correct, handles all edge cases, and doesn't intro
- Logic bugs must be demonstrable with a concrete example
- If the edge case is theoretical without practical impact, don't report it
+### Verify Before Claiming "Missing" Edge Case Handling
+
+When your finding claims an edge case is **not handled** (no check for empty, null, zero, etc.):
+
+**Ask yourself**: "Have I verified this case isn't handled, or did I just not see it?"
+
+- Read the **complete function** — guards often appear later or at the start
+- Check callers — the edge case might be prevented by caller validation
+- Look for early returns, assertions, or type guards you might have missed
+
+**Your evidence must prove absence — not just that you didn't see it.**
+
+❌ **Weak**: "Empty array case is not handled"
+✅ **Strong**: "I read the complete function (lines 12-45). There's no check for empty arrays, and the code directly accesses `arr[0]` on line 15 without any guard."
+
### Severity Classification (All block merge except LOW)
- **CRITICAL** (Blocker): Bug that will cause wrong results or crashes in production
- Example: Off-by-one causing data corruption, race condition causing lost updates
diff --git a/apps/backend/prompts/github/pr_parallel_orchestrator.md b/apps/backend/prompts/github/pr_parallel_orchestrator.md
index fbe34fb930..b26ffa97cf 100644
--- a/apps/backend/prompts/github/pr_parallel_orchestrator.md
+++ b/apps/backend/prompts/github/pr_parallel_orchestrator.md
@@ -6,6 +6,34 @@ You are an expert PR reviewer orchestrating a comprehensive, parallel code revie
**YOU decide which agents to invoke based on YOUR analysis of the PR.** There are no programmatic rules - you evaluate the PR's content, complexity, and risk areas, then delegate to the appropriate specialists.
+## CRITICAL: PR Scope and Context
+
+### What IS in scope (report these issues):
+1. **Issues in changed code** - Problems in files/lines actually modified by this PR
+2. **Impact on unchanged code** - "You changed X but forgot to update Y that depends on it"
+3. **Missing related changes** - "This pattern also exists in Z, did you mean to update it too?"
+4. **Breaking changes** - "This change breaks callers in other files"
+
+### What is NOT in scope (do NOT report):
+1. **Pre-existing issues** - Old bugs/issues in code this PR didn't touch
+2. **Unrelated improvements** - Don't suggest refactoring untouched code
+
+**Key distinction:**
+- ✅ "Your change to `validateUser()` breaks the caller in `auth.ts:45`" - GOOD (impact of PR)
+- ✅ "You updated this validation but similar logic in `utils.ts` wasn't updated" - GOOD (incomplete)
+- ❌ "The existing code in `legacy.ts` has a SQL injection" - BAD (pre-existing, not this PR)
+
+## Merge Conflicts
+
+**Check for merge conflicts in the PR context.** If `has_merge_conflicts` is `true`:
+
+1. **Report this prominently** - Merge conflicts block the PR from being merged
+2. **Add a CRITICAL finding** with category "merge_conflict" and severity "critical"
+3. **Include in verdict reasoning** - The PR cannot be merged until conflicts are resolved
+
+Note: GitHub's API tells us IF there are conflicts but not WHICH files. The finding should state:
+> "This PR has merge conflicts with the base branch that must be resolved before merging."
+
## Available Specialist Agents
You have access to these specialized review agents via the Task tool:
diff --git a/apps/backend/prompts/github/pr_quality_agent.md b/apps/backend/prompts/github/pr_quality_agent.md
index f3007f1f81..7a3445fce6 100644
--- a/apps/backend/prompts/github/pr_quality_agent.md
+++ b/apps/backend/prompts/github/pr_quality_agent.md
@@ -6,6 +6,23 @@ You are a focused code quality review agent. You have been spawned by the orches
Perform a thorough code quality review of the provided code changes. Focus on maintainability, correctness, and adherence to best practices.
+## CRITICAL: PR Scope and Context
+
+### What IS in scope (report these issues):
+1. **Quality issues in changed code** - Problems in files/lines modified by this PR
+2. **Quality impact of changes** - "This change increases complexity of `handler.ts`"
+3. **Incomplete refactoring** - "You cleaned up X but similar pattern in Y wasn't updated"
+4. **New code not following patterns** - "New function doesn't match project's error handling pattern"
+
+### What is NOT in scope (do NOT report):
+1. **Pre-existing quality issues** - Old code smells in untouched code
+2. **Unrelated improvements** - Don't suggest refactoring code the PR didn't touch
+
+**Key distinction:**
+- ✅ "Your new function has high cyclomatic complexity" - GOOD (new code)
+- ✅ "This duplicates existing helper in `utils.ts`, consider reusing it" - GOOD (guidance)
+- ❌ "The old `legacy.ts` file has 1000 lines" - BAD (pre-existing, not this PR)
+
## Quality Focus Areas
### 1. Code Complexity
@@ -62,6 +79,21 @@ Perform a thorough code quality review of the provided code changes. Focus on ma
- If it's subjective or debatable, don't report it
- Focus on objective quality issues
+### Verify Before Claiming "Missing" Handling
+
+When your finding claims something is **missing** (no error handling, no fallback, no cleanup):
+
+**Ask yourself**: "Have I verified this is actually missing, or did I just not see it?"
+
+- Read the **complete function**, not just the flagged line — error handling often appears later
+- Check for try/catch blocks, guards, or fallbacks you might have missed
+- Look for framework-level handling (global error handlers, middleware)
+
+**Your evidence must prove absence — not just that you didn't see it.**
+
+❌ **Weak**: "This async call has no error handling"
+✅ **Strong**: "I read the complete `processOrder()` function (lines 34-89). The `fetch()` call on line 45 has no try/catch, and there's no `.catch()` anywhere in the function."
+
### Severity Classification (All block merge except LOW)
- **CRITICAL** (Blocker): Bug that will cause failures in production
- Example: Unhandled promise rejection, memory leak
diff --git a/apps/backend/prompts/github/pr_reviewer.md b/apps/backend/prompts/github/pr_reviewer.md
index 72a8b5dada..93d16ec4cb 100644
--- a/apps/backend/prompts/github/pr_reviewer.md
+++ b/apps/backend/prompts/github/pr_reviewer.md
@@ -4,24 +4,49 @@
You are a senior software engineer and security specialist performing a comprehensive code review. You have deep expertise in security vulnerabilities, code quality, software architecture, and industry best practices. Your reviews are thorough yet focused on issues that genuinely impact code security, correctness, and maintainability.
-## Review Methodology: Chain-of-Thought Analysis
+## Review Methodology: Evidence-Based Analysis
For each potential issue you consider:
1. **First, understand what the code is trying to do** - What is the developer's intent? What problem are they solving?
2. **Analyze if there are any problems with this approach** - Are there security risks, bugs, or design issues?
3. **Assess the severity and real-world impact** - Can this be exploited? Will this cause production issues? How likely is it to occur?
-4. **Apply the 80% confidence threshold** - Only report if you have >80% confidence this is a genuine issue with real impact
+4. **REQUIRE EVIDENCE** - Only report if you can show the actual problematic code snippet
5. **Provide a specific, actionable fix** - Give the developer exactly what they need to resolve the issue
-## Confidence Requirements
+## Evidence Requirements
-**CRITICAL: Quality over quantity**
+**CRITICAL: No evidence = No finding**
-- Only report findings where you have **>80% confidence** this is a real issue
-- If uncertain or it "could be a problem in theory," **DO NOT include it**
-- **5 high-quality findings are far better than 15 low-quality ones**
-- Each finding should pass the test: "Would I stake my reputation on this being a genuine issue?"
+- **Every finding MUST include actual code evidence** (the `evidence` field with a copy-pasted code snippet)
+- If you can't show the problematic code, **DO NOT report the finding**
+- The evidence must be verifiable - it should exist at the file and line you specify
+- **5 evidence-backed findings are far better than 15 speculative ones**
+- Each finding should pass the test: "Can I prove this with actual code from the file?"
+
+## NEVER ASSUME - ALWAYS VERIFY
+
+**This is the most important rule for avoiding false positives:**
+
+1. **NEVER assume code is vulnerable** - Read the actual implementation first
+2. **NEVER assume validation is missing** - Check callers and surrounding code for sanitization
+3. **NEVER assume a pattern is dangerous** - Verify there's no framework protection or mitigation
+4. **NEVER report based on function names alone** - A function called `unsafeQuery` might actually be safe
+5. **NEVER extrapolate from one line** - Read ±20 lines of context minimum
+
+**Before reporting ANY finding, you MUST:**
+- Actually read the code at the file/line you're about to cite
+- Verify the problematic pattern exists exactly as you describe
+- Check if there's validation/sanitization before or after
+- Confirm the code path is actually reachable
+- Verify the line number exists (file might be shorter than you think)
+
+**Common false positive causes to avoid:**
+- Reporting line 500 when the file only has 400 lines (hallucination)
+- Claiming "no validation" when validation exists in the caller
+- Flagging parameterized queries as SQL injection (framework protection)
+- Reporting XSS when output is auto-escaped by the framework
+- Citing code that was already fixed in an earlier commit
## Anti-Patterns to Avoid
@@ -214,14 +239,13 @@ Return a JSON array with this structure:
"id": "finding-1",
"severity": "critical",
"category": "security",
- "confidence": 0.95,
"title": "SQL Injection vulnerability in user search",
"description": "The search query parameter is directly interpolated into the SQL string without parameterization. This allows attackers to execute arbitrary SQL commands by injecting malicious input like `' OR '1'='1`.",
"impact": "An attacker can read, modify, or delete any data in the database, including sensitive user information, payment details, or admin credentials. This could lead to complete data breach.",
"file": "src/api/users.ts",
"line": 42,
"end_line": 45,
- "code_snippet": "const query = `SELECT * FROM users WHERE name LIKE '%${searchTerm}%'`",
+ "evidence": "const query = `SELECT * FROM users WHERE name LIKE '%${searchTerm}%'`",
"suggested_fix": "Use parameterized queries to prevent SQL injection:\n\nconst query = 'SELECT * FROM users WHERE name LIKE ?';\nconst results = await db.query(query, [`%${searchTerm}%`]);",
"fixable": true,
"references": ["https://owasp.org/www-community/attacks/SQL_Injection"]
@@ -230,13 +254,12 @@ Return a JSON array with this structure:
"id": "finding-2",
"severity": "high",
"category": "security",
- "confidence": 0.88,
"title": "Missing authorization check allows privilege escalation",
"description": "The deleteUser endpoint only checks if the user is authenticated, but doesn't verify if they have admin privileges. Any logged-in user can delete other user accounts.",
"impact": "Regular users can delete admin accounts or any other user, leading to service disruption, data loss, and potential account takeover attacks.",
"file": "src/api/admin.ts",
"line": 78,
- "code_snippet": "router.delete('/users/:id', authenticate, async (req, res) => {\n await User.delete(req.params.id);\n});",
+ "evidence": "router.delete('/users/:id', authenticate, async (req, res) => {\n await User.delete(req.params.id);\n});",
"suggested_fix": "Add authorization check:\n\nrouter.delete('/users/:id', authenticate, requireAdmin, async (req, res) => {\n await User.delete(req.params.id);\n});\n\n// Or inline:\nif (!req.user.isAdmin) {\n return res.status(403).json({ error: 'Admin access required' });\n}",
"fixable": true,
"references": ["https://owasp.org/Top10/A01_2021-Broken_Access_Control/"]
@@ -245,13 +268,13 @@ Return a JSON array with this structure:
"id": "finding-3",
"severity": "medium",
"category": "quality",
- "confidence": 0.82,
"title": "Function exceeds complexity threshold",
"description": "The processPayment function has 15 conditional branches, making it difficult to test all paths and maintain. High cyclomatic complexity increases bug risk.",
"impact": "High complexity functions are more likely to contain bugs, harder to test comprehensively, and difficult for other developers to understand and modify safely.",
"file": "src/payments/processor.ts",
"line": 125,
"end_line": 198,
+ "evidence": "async function processPayment(payment: Payment): Promise {\n if (payment.type === 'credit') { ... } else if (payment.type === 'debit') { ... }\n // 15+ branches follow\n}",
"suggested_fix": "Extract sub-functions to reduce complexity:\n\n1. validatePaymentData(payment) - handle all validation\n2. calculateFees(amount, type) - fee calculation logic\n3. processRefund(payment) - refund-specific logic\n4. sendPaymentNotification(payment, status) - notification logic\n\nThis will reduce the main function to orchestration only.",
"fixable": false,
"references": []
@@ -270,19 +293,18 @@ Return a JSON array with this structure:
- **medium** (Recommended): Improve code quality (maintainability concerns) - **Blocks merge: YES** (AI fixes quickly)
- **low** (Suggestion): Suggestions for improvement (minor enhancements) - **Blocks merge: NO**
- **category**: `security` | `quality` | `logic` | `test` | `docs` | `pattern` | `performance`
-- **confidence**: Float 0.0-1.0 representing your confidence this is a genuine issue (must be ≥0.80)
- **title**: Short, specific summary (max 80 chars)
- **description**: Detailed explanation of the issue
- **impact**: Real-world consequences if not fixed (business/security/user impact)
- **file**: Relative file path
- **line**: Starting line number
+- **evidence**: **REQUIRED** - Actual code snippet from the file proving the issue exists. Must be copy-pasted from the actual code.
- **suggested_fix**: Specific code changes or guidance to resolve the issue
- **fixable**: Boolean - can this be auto-fixed by a code tool?
### Optional Fields
- **end_line**: Ending line number for multi-line issues
-- **code_snippet**: The problematic code excerpt
- **references**: Array of relevant URLs (OWASP, CVE, documentation)
## Guidelines for High-Quality Reviews
@@ -292,7 +314,7 @@ Return a JSON array with this structure:
3. **Explain impact**: Don't just say what's wrong, explain the real-world consequences
4. **Prioritize ruthlessly**: Focus on issues that genuinely matter
5. **Consider context**: Understand the purpose of changed code before flagging issues
-6. **Validate confidence**: If you're not >80% sure, don't report it
+6. **Require evidence**: Always include the actual code snippet in the `evidence` field - no code, no finding
7. **Provide references**: Link to OWASP, CVE databases, or official documentation when relevant
8. **Think like an attacker**: For security issues, explain how it could be exploited
9. **Be constructive**: Frame issues as opportunities to improve, not criticisms
@@ -314,13 +336,12 @@ Return a JSON array with this structure:
"id": "finding-auth-1",
"severity": "critical",
"category": "security",
- "confidence": 0.92,
"title": "JWT secret hardcoded in source code",
"description": "The JWT signing secret 'super-secret-key-123' is hardcoded in the authentication middleware. Anyone with access to the source code can forge authentication tokens for any user.",
"impact": "An attacker can create valid JWT tokens for any user including admins, leading to complete account takeover and unauthorized access to all user data and admin functions.",
"file": "src/middleware/auth.ts",
"line": 12,
- "code_snippet": "const SECRET = 'super-secret-key-123';\njwt.sign(payload, SECRET);",
+ "evidence": "const SECRET = 'super-secret-key-123';\njwt.sign(payload, SECRET);",
"suggested_fix": "Move the secret to environment variables:\n\n// In .env file:\nJWT_SECRET=\n\n// In auth.ts:\nconst SECRET = process.env.JWT_SECRET;\nif (!SECRET) {\n throw new Error('JWT_SECRET not configured');\n}\njwt.sign(payload, SECRET);",
"fixable": true,
"references": [
@@ -332,4 +353,4 @@ Return a JSON array with this structure:
---
-Remember: Your goal is to find **genuine, high-impact issues** that will make the codebase more secure, correct, and maintainable. Quality over quantity. Be thorough but focused.
+Remember: Your goal is to find **genuine, high-impact issues** that will make the codebase more secure, correct, and maintainable. **Every finding must include code evidence** - if you can't show the actual code, don't report the finding. Quality over quantity. Be thorough but focused.
diff --git a/apps/backend/prompts/github/pr_security_agent.md b/apps/backend/prompts/github/pr_security_agent.md
index e2c3ae3686..15061038b4 100644
--- a/apps/backend/prompts/github/pr_security_agent.md
+++ b/apps/backend/prompts/github/pr_security_agent.md
@@ -6,6 +6,23 @@ You are a focused security review agent. You have been spawned by the orchestrat
Perform a thorough security review of the provided code changes, focusing ONLY on security vulnerabilities. Do not review code quality, style, or other non-security concerns.
+## CRITICAL: PR Scope and Context
+
+### What IS in scope (report these issues):
+1. **Security issues in changed code** - Vulnerabilities introduced or modified by this PR
+2. **Security impact of changes** - "This change exposes sensitive data to the new endpoint"
+3. **Missing security for new features** - "New API endpoint lacks authentication"
+4. **Broken security assumptions** - "Change to auth.ts invalidates security check in handler.ts"
+
+### What is NOT in scope (do NOT report):
+1. **Pre-existing vulnerabilities** - Old security issues in code this PR didn't touch
+2. **Unrelated security improvements** - Don't suggest hardening untouched code
+
+**Key distinction:**
+- ✅ "Your new endpoint lacks rate limiting" - GOOD (new code)
+- ✅ "This change bypasses the auth check in `middleware.ts`" - GOOD (impact analysis)
+- ❌ "The old `legacy_auth.ts` uses MD5 for passwords" - BAD (pre-existing, not this PR)
+
## Security Focus Areas
### 1. Injection Vulnerabilities
@@ -57,6 +74,21 @@ Perform a thorough security review of the provided code changes, focusing ONLY o
- If you're unsure, don't report it
- Prefer false negatives over false positives
+### Verify Before Claiming "Missing" Protections
+
+When your finding claims protection is **missing** (no validation, no sanitization, no auth check):
+
+**Ask yourself**: "Have I verified this is actually missing, or did I just not see it?"
+
+- Check if validation/sanitization exists elsewhere (middleware, caller, framework)
+- Read the **complete function**, not just the flagged line
+- Look for comments explaining why something appears unprotected
+
+**Your evidence must prove absence — not just that you didn't see it.**
+
+❌ **Weak**: "User input is used without validation"
+✅ **Strong**: "I checked the complete request flow. Input reaches this SQL query without passing through any validation or sanitization layer."
+
### Severity Classification (All block merge except LOW)
- **CRITICAL** (Blocker): Exploitable vulnerability leading to data breach, RCE, or system compromise
- Example: SQL injection, hardcoded admin password
diff --git a/apps/backend/prompts/qa_fixer.md b/apps/backend/prompts/qa_fixer.md
index 8507756946..fe5c018025 100644
--- a/apps/backend/prompts/qa_fixer.md
+++ b/apps/backend/prompts/qa_fixer.md
@@ -80,6 +80,68 @@ lsof -iTCP -sTCP:LISTEN | grep -E "node|python|next|vite"
---
+## 🚨 CRITICAL: PATH CONFUSION PREVENTION 🚨
+
+**THE #1 BUG IN MONOREPOS: Doubled paths after `cd` commands**
+
+### The Problem
+
+After running `cd ./apps/frontend`, your current directory changes. If you then use paths like `apps/frontend/src/file.ts`, you're creating **doubled paths** like `apps/frontend/apps/frontend/src/file.ts`.
+
+### The Solution: ALWAYS CHECK YOUR CWD
+
+**BEFORE every git command or file operation:**
+
+```bash
+# Step 1: Check where you are
+pwd
+
+# Step 2: Use paths RELATIVE TO CURRENT DIRECTORY
+# If pwd shows: /path/to/project/apps/frontend
+# Then use: git add src/file.ts
+# NOT: git add apps/frontend/src/file.ts
+```
+
+### Examples
+
+**❌ WRONG - Path gets doubled:**
+```bash
+cd ./apps/frontend
+git add apps/frontend/src/file.ts # Looks for apps/frontend/apps/frontend/src/file.ts
+```
+
+**✅ CORRECT - Use relative path from current directory:**
+```bash
+cd ./apps/frontend
+pwd # Shows: /path/to/project/apps/frontend
+git add src/file.ts # Correctly adds apps/frontend/src/file.ts from project root
+```
+
+**✅ ALSO CORRECT - Stay at root, use full relative path:**
+```bash
+# Don't change directory at all
+git add ./apps/frontend/src/file.ts # Works from project root
+```
+
+### Mandatory Pre-Command Check
+
+**Before EVERY git add, git commit, or file operation in a monorepo:**
+
+```bash
+# 1. Where am I?
+pwd
+
+# 2. What files am I targeting?
+ls -la [target-path] # Verify the path exists
+
+# 3. Only then run the command
+git add [verified-path]
+```
+
+**This check takes 2 seconds and prevents hours of debugging.**
+
+---
+
## PHASE 3: FIX ISSUES ONE BY ONE
For each issue in the fix request:
@@ -166,8 +228,45 @@ If any issue is not fixed, go back to Phase 3.
## PHASE 6: COMMIT FIXES
+### Path Verification (MANDATORY FIRST STEP)
+
+**🚨 BEFORE running ANY git commands, verify your current directory:**
+
```bash
-git add .
+# Step 1: Where am I?
+pwd
+
+# Step 2: What files do I want to commit?
+# If you changed to a subdirectory (e.g., cd apps/frontend),
+# you need to use paths RELATIVE TO THAT DIRECTORY, not from project root
+
+# Step 3: Verify paths exist
+ls -la [path-to-files] # Make sure the path is correct from your current location
+
+# Example in a monorepo:
+# If pwd shows: /project/apps/frontend
+# Then use: git add src/file.ts
+# NOT: git add apps/frontend/src/file.ts (this would look for apps/frontend/apps/frontend/src/file.ts)
+```
+
+**CRITICAL RULE:** If you're in a subdirectory, either:
+- **Option A:** Return to project root: `cd [back to working directory]`
+- **Option B:** Use paths relative to your CURRENT directory (check with `pwd`)
+
+### Create the Commit
+
+```bash
+# FIRST: Make sure you're in the working directory root
+pwd # Should match your working directory
+
+# Add all files EXCEPT .auto-claude directory (spec files should never be committed)
+git add . ':!.auto-claude'
+
+# If git add fails with "pathspec did not match", you have a path problem:
+# 1. Run pwd to see where you are
+# 2. Run git status to see what git sees
+# 3. Adjust your paths accordingly
+
git commit -m "fix: Address QA issues (qa-requested)
Fixes:
@@ -182,6 +281,8 @@ Verified:
QA Fix Session: [N]"
```
+**CRITICAL**: The `:!.auto-claude` pathspec exclusion ensures spec files are NEVER committed.
+
**NOTE**: Do NOT push to remote. All work stays local until user reviews and approves.
---
@@ -304,6 +405,13 @@ npx prisma migrate dev --name [name]
- How you verified
- Commit messages
+### Git Configuration - NEVER MODIFY
+**CRITICAL**: You MUST NOT modify git user configuration. Never run:
+- `git config user.name`
+- `git config user.email`
+
+The repository inherits the user's configured git identity. Do NOT set test users.
+
---
## QA LOOP BEHAVIOR
diff --git a/apps/backend/prompts/qa_reviewer.md b/apps/backend/prompts/qa_reviewer.md
index d986a41b6e..ff52320a6b 100644
--- a/apps/backend/prompts/qa_reviewer.md
+++ b/apps/backend/prompts/qa_reviewer.md
@@ -35,8 +35,8 @@ cat project_index.json
# 4. Check build progress
cat build-progress.txt
-# 5. See what files were changed
-git diff main --name-only
+# 5. See what files were changed (three-dot diff shows only spec branch changes)
+git diff {{BASE_BRANCH}}...HEAD --name-status
# 6. Read QA acceptance criteria from spec
grep -A 100 "## QA Acceptance Criteria" spec.md
@@ -514,7 +514,7 @@ All acceptance criteria verified:
The implementation is production-ready.
Sign-off recorded in implementation_plan.json.
-Ready for merge to main.
+Ready for merge to {{BASE_BRANCH}}.
```
### If Rejected:
diff --git a/apps/backend/prompts_pkg/prompt_generator.py b/apps/backend/prompts_pkg/prompt_generator.py
index 15d2bc9b09..ebd9148854 100644
--- a/apps/backend/prompts_pkg/prompt_generator.py
+++ b/apps/backend/prompts_pkg/prompt_generator.py
@@ -62,6 +62,11 @@ def generate_environment_context(project_dir: Path, spec_dir: Path) -> str:
Your filesystem is restricted to your working directory. All file paths should be
relative to this location. Do NOT use absolute paths.
+**⚠️ CRITICAL:** Before ANY git command or file operation, run `pwd` to verify your current
+directory. If you've used `cd` to change directories, you MUST use paths relative to your
+NEW location, not the working directory. See the PATH CONFUSION PREVENTION section in the
+coder prompt for detailed examples.
+
**Important Files:**
- Spec: `{relative_spec}/spec.md`
- Plan: `{relative_spec}/implementation_plan.json`
diff --git a/apps/backend/prompts_pkg/prompts.py b/apps/backend/prompts_pkg/prompts.py
index acb29d7332..83a8726926 100644
--- a/apps/backend/prompts_pkg/prompts.py
+++ b/apps/backend/prompts_pkg/prompts.py
@@ -7,7 +7,9 @@
"""
import json
+import os
import re
+import subprocess
from pathlib import Path
from .project_context import (
@@ -16,6 +18,133 @@
load_project_index,
)
+
+def _validate_branch_name(branch: str | None) -> str | None:
+ """
+ Validate a git branch name for safety and correctness.
+
+ Args:
+ branch: The branch name to validate
+
+ Returns:
+ The validated branch name, or None if invalid
+ """
+ if not branch or not isinstance(branch, str):
+ return None
+
+ # Trim whitespace
+ branch = branch.strip()
+
+ # Reject empty or whitespace-only strings
+ if not branch:
+ return None
+
+ # Enforce maximum length (git refs can be long, but 255 is reasonable)
+ if len(branch) > 255:
+ return None
+
+ # Require at least one alphanumeric character
+ if not any(c.isalnum() for c in branch):
+ return None
+
+ # Only allow common git-ref characters: letters, numbers, ., _, -, /
+ # This prevents prompt injection and other security issues
+ if not re.match(r"^[A-Za-z0-9._/-]+$", branch):
+ return None
+
+ # Reject suspicious patterns that could be prompt injection attempts
+ # (newlines, control characters are already blocked by the regex above)
+
+ return branch
+
+
+def _get_base_branch_from_metadata(spec_dir: Path) -> str | None:
+ """
+ Read baseBranch from task_metadata.json if it exists.
+
+ Args:
+ spec_dir: Directory containing the spec files
+
+ Returns:
+ The baseBranch from metadata, or None if not found or invalid
+ """
+ metadata_path = spec_dir / "task_metadata.json"
+ if metadata_path.exists():
+ try:
+ with open(metadata_path, encoding="utf-8") as f:
+ metadata = json.load(f)
+ base_branch = metadata.get("baseBranch")
+ # Validate the branch name before returning
+ return _validate_branch_name(base_branch)
+ except (json.JSONDecodeError, OSError):
+ pass
+ return None
+
+
+def _detect_base_branch(spec_dir: Path, project_dir: Path) -> str:
+ """
+ Detect the base branch for a project/task.
+
+ Priority order:
+ 1. baseBranch from task_metadata.json (task-level override)
+ 2. DEFAULT_BRANCH environment variable
+ 3. Auto-detect main/master/develop (if they exist in git)
+ 4. Fall back to "main"
+
+ Args:
+ spec_dir: Directory containing the spec files
+ project_dir: Project root directory
+
+ Returns:
+ The detected base branch name
+ """
+ # 1. Check task_metadata.json for task-specific baseBranch
+ metadata_branch = _get_base_branch_from_metadata(spec_dir)
+ if metadata_branch:
+ return metadata_branch
+
+ # 2. Check for DEFAULT_BRANCH env var
+ env_branch = _validate_branch_name(os.getenv("DEFAULT_BRANCH"))
+ if env_branch:
+ # Verify the branch exists (with timeout to prevent hanging)
+ try:
+ result = subprocess.run(
+ ["git", "rev-parse", "--verify", env_branch],
+ cwd=project_dir,
+ capture_output=True,
+ text=True,
+ encoding="utf-8",
+ errors="replace",
+ timeout=3,
+ )
+ if result.returncode == 0:
+ return env_branch
+ except subprocess.TimeoutExpired:
+ # Treat timeout as branch verification failure
+ pass
+
+ # 3. Auto-detect main/master/develop
+ for branch in ["main", "master", "develop"]:
+ try:
+ result = subprocess.run(
+ ["git", "rev-parse", "--verify", branch],
+ cwd=project_dir,
+ capture_output=True,
+ text=True,
+ encoding="utf-8",
+ errors="replace",
+ timeout=3,
+ )
+ if result.returncode == 0:
+ return branch
+ except subprocess.TimeoutExpired:
+ # Treat timeout as branch verification failure, try next branch
+ continue
+
+ # 4. Fall back to "main"
+ return "main"
+
+
# Directory containing prompt files
# prompts/ is a sibling directory of prompts_pkg/, so go up one level first
PROMPTS_DIR = Path(__file__).parent.parent / "prompts"
@@ -304,6 +433,7 @@ def get_qa_reviewer_prompt(spec_dir: Path, project_dir: Path) -> str:
1. Loads the base QA reviewer prompt
2. Detects project capabilities from project_index.json
3. Injects only relevant MCP tool documentation (Electron, Puppeteer, DB, API)
+ 4. Detects and injects the correct base branch for git comparisons
This saves context window by excluding irrelevant tool docs.
For example, a CLI Python project won't get Electron validation docs.
@@ -315,9 +445,15 @@ def get_qa_reviewer_prompt(spec_dir: Path, project_dir: Path) -> str:
Returns:
The QA reviewer prompt with project-specific tools injected
"""
+ # Detect the base branch for this task (from task_metadata.json or auto-detect)
+ base_branch = _detect_base_branch(spec_dir, project_dir)
+
# Load base QA reviewer prompt
base_prompt = _load_prompt_file("qa_reviewer.md")
+ # Replace {{BASE_BRANCH}} placeholder with the actual base branch
+ base_prompt = base_prompt.replace("{{BASE_BRANCH}}", base_branch)
+
# Load project index and detect capabilities
project_index = load_project_index(project_dir)
capabilities = detect_project_capabilities(project_index)
@@ -347,6 +483,17 @@ def get_qa_reviewer_prompt(spec_dir: Path, project_dir: Path) -> str:
The project root is: `{project_dir}`
+## GIT BRANCH CONFIGURATION
+
+**Base branch for comparison:** `{base_branch}`
+
+When checking for unrelated changes, use three-dot diff syntax:
+```bash
+git diff {base_branch}...HEAD --name-status
+```
+
+This shows only changes made in the spec branch since it diverged from `{base_branch}`.
+
---
## PROJECT CAPABILITIES DETECTED
diff --git a/apps/backend/qa/loop.py b/apps/backend/qa/loop.py
index ff8308695e..fcbc1c7f34 100644
--- a/apps/backend/qa/loop.py
+++ b/apps/backend/qa/loop.py
@@ -6,6 +6,7 @@
approval or max iterations.
"""
+import os
import time as time_module
from pathlib import Path
@@ -22,6 +23,7 @@
from phase_config import get_phase_model, get_phase_thinking_budget
from phase_event import ExecutionPhase, emit_phase
from progress import count_subtasks, is_build_complete
+from security.constants import PROJECT_DIR_ENV_VAR
from task_logger import (
LogPhase,
get_task_logger,
@@ -83,6 +85,10 @@ async def run_qa_validation_loop(
Returns:
True if QA approved, False otherwise
"""
+ # Set environment variable for security hooks to find the correct project directory
+ # This is needed because os.getcwd() may return the wrong directory in worktree mode
+ os.environ[PROJECT_DIR_ENV_VAR] = str(project_dir.resolve())
+
debug_section("qa_loop", "QA Validation Loop")
debug(
"qa_loop",
diff --git a/apps/backend/query_memory.py b/apps/backend/query_memory.py
index c16f82d943..e729e892bd 100644
--- a/apps/backend/query_memory.py
+++ b/apps/backend/query_memory.py
@@ -185,24 +185,31 @@ def cmd_get_memories(args):
"""
result = conn.execute(query, parameters={"limit": limit})
- df = result.get_as_df()
+ # Process results without pandas (iterate through result set directly)
memories = []
- for _, row in df.iterrows():
+ while result.has_next():
+ row = result.get_next()
+ # Row order: uuid, name, created_at, content, description, group_id
+ uuid_val = serialize_value(row[0]) if len(row) > 0 else None
+ name_val = serialize_value(row[1]) if len(row) > 1 else ""
+ created_at_val = serialize_value(row[2]) if len(row) > 2 else None
+ content_val = serialize_value(row[3]) if len(row) > 3 else ""
+ description_val = serialize_value(row[4]) if len(row) > 4 else ""
+ group_id_val = serialize_value(row[5]) if len(row) > 5 else ""
+
memory = {
- "id": row.get("uuid") or row.get("name", "unknown"),
- "name": row.get("name", ""),
- "type": infer_episode_type(row.get("name", ""), row.get("content", "")),
- "timestamp": row.get("created_at") or datetime.now().isoformat(),
- "content": row.get("content")
- or row.get("description")
- or row.get("name", ""),
- "description": row.get("description", ""),
- "group_id": row.get("group_id", ""),
+ "id": uuid_val or name_val or "unknown",
+ "name": name_val or "",
+ "type": infer_episode_type(name_val or "", content_val or ""),
+ "timestamp": created_at_val or datetime.now().isoformat(),
+ "content": content_val or description_val or name_val or "",
+ "description": description_val or "",
+ "group_id": group_id_val or "",
}
# Extract session number if present
- session_num = extract_session_number(row.get("name", ""))
+ session_num = extract_session_number(name_val or "")
if session_num:
memory["session_number"] = session_num
@@ -251,24 +258,31 @@ def cmd_search(args):
result = conn.execute(
query, parameters={"search_query": search_query, "limit": limit}
)
- df = result.get_as_df()
+ # Process results without pandas
memories = []
- for _, row in df.iterrows():
+ while result.has_next():
+ row = result.get_next()
+ # Row order: uuid, name, created_at, content, description, group_id
+ uuid_val = serialize_value(row[0]) if len(row) > 0 else None
+ name_val = serialize_value(row[1]) if len(row) > 1 else ""
+ created_at_val = serialize_value(row[2]) if len(row) > 2 else None
+ content_val = serialize_value(row[3]) if len(row) > 3 else ""
+ description_val = serialize_value(row[4]) if len(row) > 4 else ""
+ group_id_val = serialize_value(row[5]) if len(row) > 5 else ""
+
memory = {
- "id": row.get("uuid") or row.get("name", "unknown"),
- "name": row.get("name", ""),
- "type": infer_episode_type(row.get("name", ""), row.get("content", "")),
- "timestamp": row.get("created_at") or datetime.now().isoformat(),
- "content": row.get("content")
- or row.get("description")
- or row.get("name", ""),
- "description": row.get("description", ""),
- "group_id": row.get("group_id", ""),
+ "id": uuid_val or name_val or "unknown",
+ "name": name_val or "",
+ "type": infer_episode_type(name_val or "", content_val or ""),
+ "timestamp": created_at_val or datetime.now().isoformat(),
+ "content": content_val or description_val or name_val or "",
+ "description": description_val or "",
+ "group_id": group_id_val or "",
"score": 1.0, # Keyword match score
}
- session_num = extract_session_number(row.get("name", ""))
+ session_num = extract_session_number(name_val or "")
if session_num:
memory["session_number"] = session_num
@@ -461,19 +475,26 @@ def cmd_get_entities(args):
"""
result = conn.execute(query, parameters={"limit": limit})
- df = result.get_as_df()
+ # Process results without pandas
entities = []
- for _, row in df.iterrows():
- if not row.get("summary"):
+ while result.has_next():
+ row = result.get_next()
+ # Row order: uuid, name, summary, created_at
+ uuid_val = serialize_value(row[0]) if len(row) > 0 else None
+ name_val = serialize_value(row[1]) if len(row) > 1 else ""
+ summary_val = serialize_value(row[2]) if len(row) > 2 else ""
+ created_at_val = serialize_value(row[3]) if len(row) > 3 else None
+
+ if not summary_val:
continue
entity = {
- "id": row.get("uuid") or row.get("name", "unknown"),
- "name": row.get("name", ""),
- "type": infer_entity_type(row.get("name", "")),
- "timestamp": row.get("created_at") or datetime.now().isoformat(),
- "content": row.get("summary", ""),
+ "id": uuid_val or name_val or "unknown",
+ "name": name_val or "",
+ "type": infer_entity_type(name_val or ""),
+ "timestamp": created_at_val or datetime.now().isoformat(),
+ "content": summary_val or "",
}
entities.append(entity)
@@ -488,6 +509,118 @@ def cmd_get_entities(args):
output_error(f"Query failed: {e}")
+def cmd_add_episode(args):
+ """
+ Add a new episode to the memory database.
+
+ This is called from the Electron main process to save PR review insights,
+ patterns, gotchas, and other memories directly to the LadybugDB database.
+
+ Args:
+ args.db_path: Path to database directory
+ args.database: Database name
+ args.name: Episode name/title
+ args.content: Episode content (JSON string)
+ args.episode_type: Type of episode (session_insight, pattern, gotcha, task_outcome, pr_review)
+ args.group_id: Optional group ID for namespacing
+ """
+ if not apply_monkeypatch():
+ output_error("Neither kuzu nor LadybugDB is installed")
+ return
+
+ try:
+ import uuid as uuid_module
+
+ try:
+ import kuzu
+ except ImportError:
+ import real_ladybug as kuzu
+
+ # Parse content from JSON if provided
+ content = args.content
+ if content:
+ try:
+ # Try to parse as JSON to validate
+ parsed = json.loads(content)
+ # Re-serialize to ensure consistent formatting
+ content = json.dumps(parsed)
+ except json.JSONDecodeError:
+ # If not valid JSON, use as-is
+ pass
+
+ # Generate unique ID
+ episode_uuid = str(uuid_module.uuid4())
+ created_at = datetime.now().isoformat()
+
+ # Get database path - create directory if needed
+ full_path = Path(args.db_path) / args.database
+ if not full_path.exists():
+ # For new databases, create the parent directory
+ Path(args.db_path).mkdir(parents=True, exist_ok=True)
+
+ # Open database (creates it if it doesn't exist)
+ db = kuzu.Database(str(full_path))
+ conn = kuzu.Connection(db)
+
+ # Always try to create the Episodic table if it doesn't exist
+ # This handles both new databases and existing databases without the table
+ try:
+ conn.execute("""
+ CREATE NODE TABLE IF NOT EXISTS Episodic (
+ uuid STRING PRIMARY KEY,
+ name STRING,
+ content STRING,
+ source_description STRING,
+ group_id STRING,
+ created_at STRING
+ )
+ """)
+ except Exception as schema_err:
+ # Table might already exist with different schema - that's ok
+ # The insert will fail if schema is incompatible
+ sys.stderr.write(f"Schema creation note: {schema_err}\n")
+
+ # Insert the episode
+ try:
+ insert_query = """
+ CREATE (e:Episodic {
+ uuid: $uuid,
+ name: $name,
+ content: $content,
+ source_description: $description,
+ group_id: $group_id,
+ created_at: $created_at
+ })
+ """
+ conn.execute(
+ insert_query,
+ parameters={
+ "uuid": episode_uuid,
+ "name": args.name,
+ "content": content,
+ "description": f"[{args.episode_type}] {args.name}",
+ "group_id": args.group_id or "",
+ "created_at": created_at,
+ },
+ )
+
+ output_json(
+ True,
+ data={
+ "id": episode_uuid,
+ "name": args.name,
+ "type": args.episode_type,
+ "timestamp": created_at,
+ },
+ )
+
+ except Exception as e:
+ output_error(f"Failed to insert episode: {e}")
+
+ except Exception as e:
+ output_error(f"Failed to add episode: {e}")
+
+
def infer_episode_type(name: str, content: str = "") -> str:
"""Infer the episode type from its name and content."""
name_lower = (name or "").lower()
@@ -580,6 +713,27 @@ def main():
"--limit", type=int, default=20, help="Maximum results"
)
+ # add-episode command (for saving memories from Electron app)
+ add_parser = subparsers.add_parser(
+ "add-episode",
+ help="Add an episode to the memory database (called from Electron)",
+ )
+ add_parser.add_argument("db_path", help="Path to database directory")
+ add_parser.add_argument("database", help="Database name")
+ add_parser.add_argument("--name", required=True, help="Episode name/title")
+ add_parser.add_argument(
+ "--content", required=True, help="Episode content (JSON string)"
+ )
+ add_parser.add_argument(
+ "--type",
+ dest="episode_type",
+ default="session_insight",
+ help="Episode type (session_insight, pattern, gotcha, task_outcome, pr_review)",
+ )
+ add_parser.add_argument(
+ "--group-id", dest="group_id", help="Optional group ID for namespacing"
+ )
+
args = parser.parse_args()
if not args.command:
@@ -594,6 +748,7 @@ def main():
"search": cmd_search,
"semantic-search": cmd_semantic_search,
"get-entities": cmd_get_entities,
+ "add-episode": cmd_add_episode,
}
handler = commands.get(args.command)
diff --git a/apps/backend/requirements.txt b/apps/backend/requirements.txt
index 59aec7b0ee..95c8a1eacb 100644
--- a/apps/backend/requirements.txt
+++ b/apps/backend/requirements.txt
@@ -10,6 +10,10 @@ tomli>=2.0.0; python_version < "3.11"
real_ladybug>=0.13.0; python_version >= "3.12"
graphiti-core>=0.5.0; python_version >= "3.12"
+# Windows-specific dependency for LadybugDB/Graphiti
+# pywin32 provides Windows system bindings required by real_ladybug
+pywin32>=306; sys_platform == "win32" and python_version >= "3.12"
+
# Google AI (optional - for Gemini LLM and embeddings)
google-generativeai>=0.8.0
diff --git a/apps/backend/runners/ai_analyzer/claude_client.py b/apps/backend/runners/ai_analyzer/claude_client.py
index e1f5a669dc..5d3f07121a 100644
--- a/apps/backend/runners/ai_analyzer/claude_client.py
+++ b/apps/backend/runners/ai_analyzer/claude_client.py
@@ -8,6 +8,7 @@
try:
from claude_agent_sdk import ClaudeAgentOptions, ClaudeSDKClient
+ from phase_config import resolve_model_id
CLAUDE_SDK_AVAILABLE = True
except ImportError:
@@ -17,7 +18,7 @@
class ClaudeAnalysisClient:
"""Wrapper for Claude SDK client with analysis-specific configuration."""
- DEFAULT_MODEL = "claude-sonnet-4-5-20250929"
+ DEFAULT_MODEL = "sonnet" # Shorthand - resolved via API Profile if configured
ALLOWED_TOOLS = ["Read", "Glob", "Grep"]
MAX_TURNS = 50
@@ -110,7 +111,7 @@ def _create_client(self, settings_file: Path) -> Any:
return ClaudeSDKClient(
options=ClaudeAgentOptions(
- model=self.DEFAULT_MODEL,
+ model=resolve_model_id(self.DEFAULT_MODEL), # Resolve via API Profile
system_prompt=system_prompt,
allowed_tools=self.ALLOWED_TOOLS,
max_turns=self.MAX_TURNS,
diff --git a/apps/backend/runners/github/cleanup_pr_worktrees.py b/apps/backend/runners/github/cleanup_pr_worktrees.py
new file mode 100755
index 0000000000..1a40688f9f
--- /dev/null
+++ b/apps/backend/runners/github/cleanup_pr_worktrees.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python3
+"""
+PR Worktree Cleanup Utility
+============================
+
+Command-line tool for managing PR review worktrees.
+
+Usage:
+ python cleanup_pr_worktrees.py --list # List all worktrees
+ python cleanup_pr_worktrees.py --cleanup # Run cleanup policies
+ python cleanup_pr_worktrees.py --cleanup-all # Remove ALL worktrees
+ python cleanup_pr_worktrees.py --stats # Show cleanup statistics
+"""
+
+import argparse
+
+# Load module directly to avoid import issues
+import importlib.util
+import sys
+from pathlib import Path
+
+services_dir = Path(__file__).parent / "services"
+module_path = services_dir / "pr_worktree_manager.py"
+
+spec = importlib.util.spec_from_file_location("pr_worktree_manager", module_path)
+pr_worktree_module = importlib.util.module_from_spec(spec)
+spec.loader.exec_module(pr_worktree_module)
+
+PRWorktreeManager = pr_worktree_module.PRWorktreeManager
+DEFAULT_PR_WORKTREE_MAX_AGE_DAYS = pr_worktree_module.DEFAULT_PR_WORKTREE_MAX_AGE_DAYS
+DEFAULT_MAX_PR_WORKTREES = pr_worktree_module.DEFAULT_MAX_PR_WORKTREES
+_get_max_age_days = pr_worktree_module._get_max_age_days
+_get_max_pr_worktrees = pr_worktree_module._get_max_pr_worktrees
+
+
+def find_project_root() -> Path:
+ """Find the git project root directory."""
+ current = Path.cwd()
+ while current != current.parent:
+ if (current / ".git").exists():
+ return current
+ current = current.parent
+ raise RuntimeError("Not in a git repository")
+
+
+def list_worktrees(manager: PRWorktreeManager) -> None:
+ """List all PR review worktrees."""
+ worktrees = manager.get_worktree_info()
+
+ if not worktrees:
+ print("No PR review worktrees found.")
+ return
+
+ print(f"\nFound {len(worktrees)} PR review worktrees:\n")
+ print(f"{'Directory':<40} {'Age (days)':<12} {'PR':<6}")
+ print("-" * 60)
+
+ for wt in worktrees:
+ pr_str = f"#{wt.pr_number}" if wt.pr_number else "N/A"
+ print(f"{wt.path.name:<40} {wt.age_days:>10.1f} {pr_str:>6}")
+
+ print()
+
+
+def show_stats(manager: PRWorktreeManager) -> None:
+ """Show worktree cleanup statistics."""
+ worktrees = manager.get_worktree_info()
+ registered = manager.get_registered_worktrees()
+ # Use resolved paths for consistent comparison (handles macOS symlinks)
+ registered_resolved = {p.resolve() for p in registered}
+
+ # Get current policy values (may be overridden by env vars)
+ max_age_days = _get_max_age_days()
+ max_worktrees = _get_max_pr_worktrees()
+
+ total = len(worktrees)
+ orphaned = sum(
+ 1 for wt in worktrees if wt.path.resolve() not in registered_resolved
+ )
+ expired = sum(1 for wt in worktrees if wt.age_days > max_age_days)
+ excess = max(0, total - max_worktrees)
+
+ print("\nPR Worktree Statistics:")
+ print(f" Total worktrees: {total}")
+ print(f" Registered with git: {len(registered)}")
+ print(f" Orphaned (not in git): {orphaned}")
+ print(f" Expired (>{max_age_days} days): {expired}")
+ print(f" Excess (>{max_worktrees} limit): {excess}")
+ print()
+ print("Cleanup Policies:")
+ print(f" Max age: {max_age_days} days")
+ print(f" Max count: {max_worktrees} worktrees")
+ print()
+
+
+def cleanup_worktrees(manager: PRWorktreeManager, force: bool = False) -> None:
+ """Run cleanup policies on worktrees."""
+ print("\nRunning PR worktree cleanup...")
+ if force:
+ print("WARNING: Force cleanup - removing ALL worktrees!")
+ count = manager.cleanup_all_worktrees()
+ print(f"Removed {count} worktrees.")
+ else:
+ stats = manager.cleanup_worktrees()
+ if stats["total"] == 0:
+ print("No worktrees needed cleanup.")
+ else:
+ print("\nCleanup complete:")
+ print(f" Orphaned removed: {stats['orphaned']}")
+ print(f" Expired removed: {stats['expired']}")
+ print(f" Excess removed: {stats['excess']}")
+ print(f" Total removed: {stats['total']}")
+ print()
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Manage PR review worktrees",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog="""
+Examples:
+ python cleanup_pr_worktrees.py --list
+ python cleanup_pr_worktrees.py --cleanup
+ python cleanup_pr_worktrees.py --stats
+ python cleanup_pr_worktrees.py --cleanup-all
+
+Environment variables:
+ MAX_PR_WORKTREES=10 # Max number of worktrees to keep
+ PR_WORKTREE_MAX_AGE_DAYS=7 # Max age in days before cleanup
+ """,
+ )
+
+ parser.add_argument(
+ "--list", action="store_true", help="List all PR review worktrees"
+ )
+
+ parser.add_argument(
+ "--cleanup",
+ action="store_true",
+ help="Run cleanup policies (remove orphaned, expired, and excess worktrees)",
+ )
+
+ parser.add_argument(
+ "--cleanup-all",
+ action="store_true",
+ help="Remove ALL PR review worktrees (dangerous!)",
+ )
+
+ parser.add_argument("--stats", action="store_true", help="Show cleanup statistics")
+
+ parser.add_argument(
+ "--project-dir",
+ type=Path,
+ help="Project directory (default: auto-detect git root)",
+ )
+
+ args = parser.parse_args()
+
+ # Require at least one action
+ if not any([args.list, args.cleanup, args.cleanup_all, args.stats]):
+ parser.print_help()
+ return 1
+
+ try:
+ # Find project directory
+ if args.project_dir:
+ project_dir = args.project_dir
+ else:
+ project_dir = find_project_root()
+
+ print(f"Project directory: {project_dir}")
+
+ # Create manager
+ manager = PRWorktreeManager(
+ project_dir=project_dir, worktree_dir=".auto-claude/github/pr/worktrees"
+ )
+
+ # Execute actions
+ if args.stats:
+ show_stats(manager)
+
+ if args.list:
+ list_worktrees(manager)
+
+ if args.cleanup:
+ cleanup_worktrees(manager, force=False)
+
+ if args.cleanup_all:
+ response = input(
+ "This will remove ALL PR worktrees. Are you sure? (yes/no): "
+ )
+ if response.lower() == "yes":
+ cleanup_worktrees(manager, force=True)
+ else:
+ print("Aborted.")
+
+ return 0
+
+ except Exception as e:
+ print(f"Error: {e}", file=sys.stderr)
+ return 1
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/apps/backend/runners/github/confidence.py b/apps/backend/runners/github/confidence.py
index 0e21b211eb..70557b922c 100644
--- a/apps/backend/runners/github/confidence.py
+++ b/apps/backend/runners/github/confidence.py
@@ -1,16 +1,18 @@
"""
-Review Confidence Scoring
-=========================
+DEPRECATED: Review Confidence Scoring
+=====================================
-Adds confidence scores to review findings to help users prioritize.
+This module is DEPRECATED and will be removed in a future version.
-Features:
-- Confidence scoring based on pattern matching, historical accuracy
-- Risk assessment (false positive likelihood)
-- Evidence tracking for transparency
-- Calibration based on outcome tracking
+The confidence scoring approach has been replaced with EVIDENCE-BASED VALIDATION:
+- Instead of assigning confidence scores (0-100), findings now require concrete
+ code evidence proving the issue exists.
+- Simple rule: If you can't show the actual problematic code, don't report it.
+- Validation is binary: either the evidence exists in the file or it doesn't.
-Usage:
+For new code, use evidence-based validation in pydantic_models.py and models.py instead.
+
+Legacy Usage (deprecated):
scorer = ConfidenceScorer(learning_tracker=tracker)
# Score a finding
@@ -20,10 +22,24 @@
# Get explanation
print(scorer.explain_confidence(scored))
+
+Migration:
+ - Instead of `confidence: float`, use `evidence: str` with actual code snippets
+ - Instead of filtering by confidence threshold, verify evidence exists in file
+ - See pr_finding_validator.md for the new evidence-based approach
"""
from __future__ import annotations
+import warnings
+
+warnings.warn(
+ "The confidence module is deprecated. Use evidence-based validation instead. "
+ "See models.py 'evidence' field and pr_finding_validator.md for the new approach.",
+ DeprecationWarning,
+ stacklevel=2,
+)
+
from dataclasses import dataclass, field
from enum import Enum
from typing import Any
diff --git a/apps/backend/runners/github/context_gatherer.py b/apps/backend/runners/github/context_gatherer.py
index 0ce48bf5ea..9a3c551261 100644
--- a/apps/backend/runners/github/context_gatherer.py
+++ b/apps/backend/runners/github/context_gatherer.py
@@ -204,6 +204,11 @@ class PRContext:
# Commit SHAs for worktree creation (PR review isolation)
head_sha: str = "" # Commit SHA of PR head (headRefOid)
base_sha: str = "" # Commit SHA of PR base (baseRefOid)
+ # Merge conflict status
+ has_merge_conflicts: bool = False # True if PR has conflicts with base branch
+ merge_state_status: str = (
+ "" # BEHIND, BLOCKED, CLEAN, DIRTY, HAS_HOOKS, UNKNOWN, UNSTABLE
+ )
class PRContextGatherer:
@@ -276,6 +281,17 @@ async def gather(self) -> PRContext:
# Check if diff was truncated (empty diff but files were changed)
diff_truncated = len(diff) == 0 and len(changed_files) > 0
+ # Check merge conflict status
+ mergeable = pr_data.get("mergeable", "UNKNOWN")
+ merge_state_status = pr_data.get("mergeStateStatus", "UNKNOWN")
+ has_merge_conflicts = mergeable == "CONFLICTING"
+
+ if has_merge_conflicts:
+ print(
+ f"[Context] ⚠️ PR has merge conflicts (mergeStateStatus: {merge_state_status})",
+ flush=True,
+ )
+
return PRContext(
pr_number=self.pr_number,
title=pr_data["title"],
@@ -296,6 +312,8 @@ async def gather(self) -> PRContext:
diff_truncated=diff_truncated,
head_sha=pr_data.get("headRefOid", ""),
base_sha=pr_data.get("baseRefOid", ""),
+ has_merge_conflicts=has_merge_conflicts,
+ merge_state_status=merge_state_status,
)
async def _fetch_pr_metadata(self) -> dict:
@@ -317,6 +335,8 @@ async def _fetch_pr_metadata(self) -> dict:
"deletions",
"changedFiles",
"labels",
+ "mergeable", # MERGEABLE, CONFLICTING, or UNKNOWN
+ "mergeStateStatus", # BEHIND, BLOCKED, CLEAN, DIRTY, HAS_HOOKS, UNKNOWN, UNSTABLE
],
)
@@ -1036,28 +1056,56 @@ async def gather(self) -> FollowupReviewContext:
f"[Followup] Comparing {previous_sha[:8]}...{current_sha[:8]}", flush=True
)
- # Get commit comparison
+ # Get PR-scoped files and commits (excludes merge-introduced changes)
+ # This solves the problem where merging develop into a feature branch
+ # would include commits from other PRs in the follow-up review.
+ # Pass reviewed_file_blobs for rebase-resistant comparison
+ reviewed_file_blobs = getattr(self.previous_review, "reviewed_file_blobs", {})
try:
- comparison = await self.gh_client.compare_commits(previous_sha, current_sha)
- except Exception as e:
- print(f"[Followup] Error comparing commits: {e}", flush=True)
- return FollowupReviewContext(
- pr_number=self.pr_number,
- previous_review=self.previous_review,
- previous_commit_sha=previous_sha,
- current_commit_sha=current_sha,
- error=f"Failed to compare commits: {e}",
+ pr_files, new_commits = await self.gh_client.get_pr_files_changed_since(
+ self.pr_number, previous_sha, reviewed_file_blobs=reviewed_file_blobs
)
+ print(
+ f"[Followup] PR has {len(pr_files)} files, "
+ f"{len(new_commits)} commits since last review"
+ + (" (blob comparison used)" if reviewed_file_blobs else ""),
+ flush=True,
+ )
+ except Exception as e:
+ print(f"[Followup] Error getting PR files/commits: {e}", flush=True)
+ # Fallback to compare_commits if PR endpoints fail
+ print("[Followup] Falling back to commit comparison...", flush=True)
+ try:
+ comparison = await self.gh_client.compare_commits(
+ previous_sha, current_sha
+ )
+ new_commits = comparison.get("commits", [])
+ pr_files = comparison.get("files", [])
+ print(
+ f"[Followup] Fallback: Found {len(new_commits)} commits, "
+ f"{len(pr_files)} files (may include merge-introduced changes)",
+ flush=True,
+ )
+ except Exception as e2:
+ print(f"[Followup] Fallback also failed: {e2}", flush=True)
+ return FollowupReviewContext(
+ pr_number=self.pr_number,
+ previous_review=self.previous_review,
+ previous_commit_sha=previous_sha,
+ current_commit_sha=current_sha,
+ error=f"Failed to get PR context: {e}, fallback: {e2}",
+ )
- # Extract data from comparison
- commits = comparison.get("commits", [])
- files = comparison.get("files", [])
+ # Use PR files as the canonical list (excludes files from merged branches)
+ commits = new_commits
+ files = pr_files
print(
f"[Followup] Found {len(commits)} new commits, {len(files)} changed files",
flush=True,
)
# Build diff from file patches
+ # Note: PR files endpoint returns 'filename' key, compare returns 'filename' too
diff_parts = []
files_changed = []
for file_info in files:
@@ -1139,6 +1187,26 @@ async def gather(self) -> FollowupReviewContext:
flush=True,
)
+ # Fetch current merge conflict status
+ has_merge_conflicts = False
+ merge_state_status = "UNKNOWN"
+ try:
+ pr_status = await self.gh_client.pr_get(
+ self.pr_number,
+ json_fields=["mergeable", "mergeStateStatus"],
+ )
+ mergeable = pr_status.get("mergeable", "UNKNOWN")
+ merge_state_status = pr_status.get("mergeStateStatus", "UNKNOWN")
+ has_merge_conflicts = mergeable == "CONFLICTING"
+
+ if has_merge_conflicts:
+ print(
+ f"[Followup] ⚠️ PR has merge conflicts (mergeStateStatus: {merge_state_status})",
+ flush=True,
+ )
+ except Exception as e:
+ print(f"[Followup] Could not fetch merge status: {e}", flush=True)
+
return FollowupReviewContext(
pr_number=self.pr_number,
previous_review=self.previous_review,
@@ -1151,4 +1219,6 @@ async def gather(self) -> FollowupReviewContext:
+ contributor_reviews,
ai_bot_comments_since_review=ai_comments,
pr_reviews_since_review=pr_reviews,
+ has_merge_conflicts=has_merge_conflicts,
+ merge_state_status=merge_state_status,
)
diff --git a/apps/backend/runners/github/gh_client.py b/apps/backend/runners/github/gh_client.py
index 942aefa2b4..4ade5f913b 100644
--- a/apps/backend/runners/github/gh_client.py
+++ b/apps/backend/runners/github/gh_client.py
@@ -822,14 +822,17 @@ async def get_pr_checks(self, pr_number: int) -> dict[str, Any]:
Returns:
Dict with:
- - checks: List of check runs with name, status, conclusion
+ - checks: List of check runs with name, state
- passing: Number of passing checks
- failing: Number of failing checks
- pending: Number of pending checks
- failed_checks: List of failed check names
"""
try:
- args = ["pr", "checks", str(pr_number), "--json", "name,state,conclusion"]
+ # Note: gh pr checks --json only supports: bucket, completedAt, description,
+ # event, link, name, startedAt, state, workflow
+ # The 'state' field directly contains the result (SUCCESS, FAILURE, PENDING, etc.)
+ args = ["pr", "checks", str(pr_number), "--json", "name,state"]
args = self._add_repo_flag(args)
result = await self.run(args, timeout=30.0)
@@ -842,15 +845,14 @@ async def get_pr_checks(self, pr_number: int) -> dict[str, Any]:
for check in checks:
state = check.get("state", "").upper()
- conclusion = check.get("conclusion", "").upper()
name = check.get("name", "Unknown")
- if state == "COMPLETED":
- if conclusion in ("SUCCESS", "NEUTRAL", "SKIPPED"):
- passing += 1
- elif conclusion in ("FAILURE", "TIMED_OUT", "CANCELLED"):
- failing += 1
- failed_checks.append(name)
+ # gh pr checks 'state' directly contains: SUCCESS, FAILURE, PENDING, NEUTRAL, etc.
+ if state in ("SUCCESS", "NEUTRAL", "SKIPPED"):
+ passing += 1
+ elif state in ("FAILURE", "TIMED_OUT", "CANCELLED", "STARTUP_FAILURE"):
+ failing += 1
+ failed_checks.append(name)
else:
# PENDING, QUEUED, IN_PROGRESS, etc.
pending += 1
@@ -872,3 +874,336 @@ async def get_pr_checks(self, pr_number: int) -> dict[str, Any]:
"failed_checks": [],
"error": str(e),
}
+
+ async def get_workflows_awaiting_approval(self, pr_number: int) -> dict[str, Any]:
+ """
+ Get workflow runs awaiting approval for a PR from a fork.
+
+ Workflows from forked repositories require manual approval before running.
+ These are NOT included in `gh pr checks` and must be queried separately.
+
+ Args:
+ pr_number: PR number
+
+ Returns:
+ Dict with:
+ - awaiting_approval: Number of workflows waiting for approval
+ - workflow_runs: List of workflow runs with id, name, html_url
+ - can_approve: Whether this token can approve workflows
+ """
+ try:
+ # First, get the PR's head SHA to filter workflow runs
+ pr_args = ["pr", "view", str(pr_number), "--json", "headRefOid"]
+ pr_args = self._add_repo_flag(pr_args)
+ pr_result = await self.run(pr_args, timeout=30.0)
+ pr_data = json.loads(pr_result.stdout) if pr_result.stdout.strip() else {}
+ head_sha = pr_data.get("headRefOid", "")
+
+ if not head_sha:
+ return {
+ "awaiting_approval": 0,
+ "workflow_runs": [],
+ "can_approve": False,
+ }
+
+ # Query workflow runs with action_required status
+ # Note: We need to use the API endpoint as gh CLI doesn't have direct support
+ endpoint = (
+ "repos/{owner}/{repo}/actions/runs?status=action_required&per_page=100"
+ )
+ args = ["api", "--method", "GET", endpoint]
+
+ result = await self.run(args, timeout=30.0)
+ data = json.loads(result.stdout) if result.stdout.strip() else {}
+ all_runs = data.get("workflow_runs", [])
+
+ # Filter to only runs for this PR's head SHA
+ pr_runs = [
+ {
+ "id": run.get("id"),
+ "name": run.get("name"),
+ "html_url": run.get("html_url"),
+ "workflow_name": run.get("workflow", {}).get("name", "Unknown"),
+ }
+ for run in all_runs
+ if run.get("head_sha") == head_sha
+ ]
+
+ return {
+ "awaiting_approval": len(pr_runs),
+ "workflow_runs": pr_runs,
+ "can_approve": True, # Assume token has permission, will fail if not
+ }
+ except (GHCommandError, GHTimeoutError, json.JSONDecodeError) as e:
+ logger.warning(
+ f"Failed to get workflows awaiting approval for #{pr_number}: {e}"
+ )
+ return {
+ "awaiting_approval": 0,
+ "workflow_runs": [],
+ "can_approve": False,
+ "error": str(e),
+ }
+
+ async def approve_workflow_run(self, run_id: int) -> bool:
+ """
+ Approve a workflow run that's waiting for approval (from a fork).
+
+ Args:
+ run_id: The workflow run ID to approve
+
+ Returns:
+ True if approval succeeded, False otherwise
+ """
+ try:
+ endpoint = f"repos/{{owner}}/{{repo}}/actions/runs/{run_id}/approve"
+ args = ["api", "--method", "POST", endpoint]
+
+ await self.run(args, timeout=30.0)
+ logger.info(f"Approved workflow run {run_id}")
+ return True
+ except (GHCommandError, GHTimeoutError) as e:
+ logger.warning(f"Failed to approve workflow run {run_id}: {e}")
+ return False
+
+ async def get_pr_checks_comprehensive(self, pr_number: int) -> dict[str, Any]:
+ """
+ Get comprehensive CI status including workflows awaiting approval.
+
+ This combines:
+ - Standard check runs from `gh pr checks`
+ - Workflows awaiting approval (for fork PRs)
+
+ Args:
+ pr_number: PR number
+
+ Returns:
+ Dict with all check information including awaiting_approval count
+ """
+ # Get standard checks
+ checks = await self.get_pr_checks(pr_number)
+
+ # Get workflows awaiting approval
+ awaiting = await self.get_workflows_awaiting_approval(pr_number)
+
+ # Merge the results
+ checks["awaiting_approval"] = awaiting.get("awaiting_approval", 0)
+ checks["awaiting_workflow_runs"] = awaiting.get("workflow_runs", [])
+
+ # Update pending count to include awaiting approval
+ checks["pending"] = checks.get("pending", 0) + awaiting.get(
+ "awaiting_approval", 0
+ )
+
+ return checks
+
+ async def get_pr_files(self, pr_number: int) -> list[dict[str, Any]]:
+ """
+ Get files changed by a PR using the PR files endpoint.
+
+ IMPORTANT: This returns only files that are part of the PR's actual changes,
+ NOT files that came in from merging another branch (e.g., develop).
+ This is crucial for follow-up reviews to avoid reviewing code from other PRs.
+
+ Uses: GET /repos/{owner}/{repo}/pulls/{pr_number}/files
+
+ Args:
+ pr_number: PR number
+
+ Returns:
+ List of file objects with:
+ - filename: Path to the file
+ - status: added, removed, modified, renamed, copied, changed
+ - additions: Number of lines added
+ - deletions: Number of lines deleted
+ - changes: Total number of line changes
+ - patch: The unified diff patch for this file (may be absent for large files)
+ """
+ files = []
+ page = 1
+ per_page = 100
+
+ while True:
+ endpoint = f"repos/{{owner}}/{{repo}}/pulls/{pr_number}/files?page={page}&per_page={per_page}"
+ args = ["api", "--method", "GET", endpoint]
+
+ result = await self.run(args, timeout=60.0)
+ page_files = json.loads(result.stdout) if result.stdout.strip() else []
+
+ if not page_files:
+ break
+
+ files.extend(page_files)
+
+ # Check if we got a full page (more pages might exist)
+ if len(page_files) < per_page:
+ break
+
+ page += 1
+
+ # Safety limit to prevent infinite loops
+ if page > 50:
+ logger.warning(
+ f"PR #{pr_number} has more than 5000 files, stopping pagination"
+ )
+ break
+
+ return files
+
+ async def get_pr_commits(self, pr_number: int) -> list[dict[str, Any]]:
+ """
+ Get commits that are part of a PR using the PR commits endpoint.
+
+ IMPORTANT: This returns only commits that are part of the PR's branch,
+ NOT commits that came in from merging another branch (e.g., develop).
+ This is crucial for follow-up reviews to avoid reviewing commits from other PRs.
+
+ Uses: GET /repos/{owner}/{repo}/pulls/{pr_number}/commits
+
+ Args:
+ pr_number: PR number
+
+ Returns:
+ List of commit objects with:
+ - sha: Commit SHA
+ - commit: Object with message, author, committer info
+ - author: GitHub user who authored the commit
+ - committer: GitHub user who committed
+ - parents: List of parent commit SHAs
+ """
+ commits = []
+ page = 1
+ per_page = 100
+
+ while True:
+ endpoint = f"repos/{{owner}}/{{repo}}/pulls/{pr_number}/commits?page={page}&per_page={per_page}"
+ args = ["api", "--method", "GET", endpoint]
+
+ result = await self.run(args, timeout=60.0)
+ page_commits = json.loads(result.stdout) if result.stdout.strip() else []
+
+ if not page_commits:
+ break
+
+ commits.extend(page_commits)
+
+ # Check if we got a full page (more pages might exist)
+ if len(page_commits) < per_page:
+ break
+
+ page += 1
+
+ # Safety limit
+ if page > 10:
+ logger.warning(
+ f"PR #{pr_number} has more than 1000 commits, stopping pagination"
+ )
+ break
+
+ return commits
+
+ async def get_pr_files_changed_since(
+ self,
+ pr_number: int,
+ base_sha: str,
+ reviewed_file_blobs: dict[str, str] | None = None,
+ ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
+ """
+ Get files and commits that are part of the PR and changed since a specific commit.
+
+ This method solves the "merge introduced commits" problem by:
+ 1. Getting the canonical list of PR files (excludes files from merged branches)
+ 2. Getting the canonical list of PR commits (excludes commits from merged branches)
+ 3. Filtering to only include commits after base_sha
+
+ When a rebase/force-push is detected (base_sha not found in commits), and
+ reviewed_file_blobs is provided, uses blob SHA comparison to identify which
+ files actually changed content. This prevents re-reviewing unchanged files.
+
+ Args:
+ pr_number: PR number
+ base_sha: The commit SHA to compare from (e.g., last reviewed commit)
+ reviewed_file_blobs: Optional dict mapping filename -> blob SHA from the
+ previous review. Used as fallback when base_sha is not found (rebase).
+
+ Returns:
+ Tuple of:
+ - List of file objects that are part of the PR (filtered if blob comparison used)
+ - List of commit objects that are part of the PR and after base_sha.
+ NOTE: Returns empty list if rebase/force-push detected, since commit SHAs
+ are rewritten and we cannot determine which commits are truly "new".
+ """
+ # Get PR's canonical files (these are the actual PR changes)
+ pr_files = await self.get_pr_files(pr_number)
+
+ # Get PR's canonical commits
+ pr_commits = await self.get_pr_commits(pr_number)
+
+ # Find the position of base_sha in PR commits
+ # Use minimum 7-char prefix comparison (git's default short SHA length)
+ base_index = -1
+ min_prefix_len = 7
+ base_prefix = (
+ base_sha[:min_prefix_len] if len(base_sha) >= min_prefix_len else base_sha
+ )
+ for i, commit in enumerate(pr_commits):
+ commit_prefix = commit["sha"][:min_prefix_len]
+ if commit_prefix == base_prefix:
+ base_index = i
+ break
+
+ # Commits after base_sha (these are the new commits to review)
+ if base_index >= 0:
+ new_commits = pr_commits[base_index + 1 :]
+ return pr_files, new_commits
+
+ # base_sha not found in PR commits - this happens when:
+ # 1. The base_sha was from a merge commit (not a direct PR commit)
+ # 2. The PR was rebased/force-pushed
+ logger.warning(
+ f"base_sha {base_sha[:8]} not found in PR #{pr_number} commits. "
+ "PR was likely rebased or force-pushed."
+ )
+
+ # If we have blob SHAs from the previous review, use them to filter files
+ # Blob SHAs persist across rebases - same content = same blob SHA
+ if reviewed_file_blobs: # Only use blob comparison if we have actual blob data
+ changed_files = []
+ unchanged_count = 0
+ for file in pr_files:
+ filename = file.get("filename", "")
+ current_blob_sha = file.get("sha", "")
+ file_status = file.get("status", "")
+ previous_blob_sha = reviewed_file_blobs.get(filename, "")
+
+ # Always include files that were added, removed, or renamed
+ # These are significant changes regardless of blob SHA
+ if file_status in ("added", "removed", "renamed"):
+ changed_files.append(file)
+ elif not previous_blob_sha:
+ # File wasn't in previous review - include it
+ changed_files.append(file)
+ elif current_blob_sha != previous_blob_sha:
+ # File content changed - include it
+ changed_files.append(file)
+ else:
+ # Same blob SHA = same content - skip it
+ unchanged_count += 1
+
+ if unchanged_count > 0:
+ logger.info(
+ f"Blob comparison: {len(changed_files)} files changed, "
+ f"{unchanged_count} unchanged (skipped)"
+ )
+
+ # Return filtered files but empty commits list (can't determine "new" commits after rebase)
+ # After a rebase, all commit SHAs are rewritten so we can't identify which are truly new.
+ # The file changes via blob comparison are the reliable source of what changed.
+ return changed_files, []
+
+ # No blob data available - return all files but empty commits (can't determine new commits)
+ logger.warning(
+ "No reviewed_file_blobs available for blob comparison after rebase. "
+ "Returning all PR files with empty commits list."
+ )
+ return pr_files, []
diff --git a/apps/backend/runners/github/models.py b/apps/backend/runners/github/models.py
index cb7dbe22e9..d4c4a90cf2 100644
--- a/apps/backend/runners/github/models.py
+++ b/apps/backend/runners/github/models.py
@@ -65,6 +65,17 @@ class MergeVerdict(str, Enum):
BLOCKED = "blocked" # Critical issues, cannot merge
+# Constants for branch-behind messaging (DRY - used across multiple reviewers)
+BRANCH_BEHIND_BLOCKER_MSG = (
+ "Branch Out of Date: PR branch is behind the base branch and needs to be updated"
+)
+BRANCH_BEHIND_REASONING = (
+ "Branch is out of date with base branch. Update branch first - "
+ "if no conflicts arise, you can merge. If merge conflicts arise, "
+ "resolve them and run follow-up review again."
+)
+
+
class AICommentVerdict(str, Enum):
"""Verdict on AI tool comments (CodeRabbit, Cursor, Greptile, etc.)."""
@@ -214,19 +225,18 @@ class PRReviewFinding:
end_line: int | None = None
suggested_fix: str | None = None
fixable: bool = False
- # NEW: Support for verification and redundancy detection
- confidence: float = 0.85 # AI's confidence in this finding (0.0-1.0)
+ # Evidence-based validation: actual code proving the issue exists
+ evidence: str | None = None # Actual code snippet showing the issue
verification_note: str | None = (
None # What evidence is missing or couldn't be verified
)
redundant_with: str | None = None # Reference to duplicate code (file:line)
- # NEW: Finding validation fields (from finding-validator re-investigation)
+ # Finding validation fields (from finding-validator re-investigation)
validation_status: str | None = (
None # confirmed_valid, dismissed_false_positive, needs_human_review
)
validation_evidence: str | None = None # Code snippet examined during validation
- validation_confidence: float | None = None # Confidence of validation (0.0-1.0)
validation_explanation: str | None = None # Why finding was validated/dismissed
def to_dict(self) -> dict:
@@ -241,14 +251,13 @@ def to_dict(self) -> dict:
"end_line": self.end_line,
"suggested_fix": self.suggested_fix,
"fixable": self.fixable,
- # NEW fields
- "confidence": self.confidence,
+ # Evidence-based validation fields
+ "evidence": self.evidence,
"verification_note": self.verification_note,
"redundant_with": self.redundant_with,
# Validation fields
"validation_status": self.validation_status,
"validation_evidence": self.validation_evidence,
- "validation_confidence": self.validation_confidence,
"validation_explanation": self.validation_explanation,
}
@@ -265,14 +274,13 @@ def from_dict(cls, data: dict) -> PRReviewFinding:
end_line=data.get("end_line"),
suggested_fix=data.get("suggested_fix"),
fixable=data.get("fixable", False),
- # NEW fields
- confidence=data.get("confidence", 0.85),
+ # Evidence-based validation fields
+ evidence=data.get("evidence"),
verification_note=data.get("verification_note"),
redundant_with=data.get("redundant_with"),
# Validation fields
validation_status=data.get("validation_status"),
validation_evidence=data.get("validation_evidence"),
- validation_confidence=data.get("validation_confidence"),
validation_explanation=data.get("validation_explanation"),
)
@@ -383,6 +391,9 @@ class PRReviewResult:
# Follow-up review tracking
reviewed_commit_sha: str | None = None # HEAD SHA at time of review
+ reviewed_file_blobs: dict[str, str] = field(
+ default_factory=dict
+ ) # filename → blob SHA at time of review (survives rebases)
is_followup_review: bool = False # True if this is a follow-up review
previous_review_id: int | None = None # Reference to the review this follows up on
resolved_findings: list[str] = field(default_factory=list) # Finding IDs now fixed
@@ -421,6 +432,7 @@ def to_dict(self) -> dict:
"quick_scan_summary": self.quick_scan_summary,
# Follow-up review fields
"reviewed_commit_sha": self.reviewed_commit_sha,
+ "reviewed_file_blobs": self.reviewed_file_blobs,
"is_followup_review": self.is_followup_review,
"previous_review_id": self.previous_review_id,
"resolved_findings": self.resolved_findings,
@@ -465,6 +477,7 @@ def from_dict(cls, data: dict) -> PRReviewResult:
quick_scan_summary=data.get("quick_scan_summary", {}),
# Follow-up review fields
reviewed_commit_sha=data.get("reviewed_commit_sha"),
+ reviewed_file_blobs=data.get("reviewed_file_blobs", {}),
is_followup_review=data.get("is_followup_review", False),
previous_review_id=data.get("previous_review_id"),
resolved_findings=data.get("resolved_findings", []),
@@ -562,6 +575,16 @@ class FollowupReviewContext:
# These are different from comments - they're full review submissions with body text
pr_reviews_since_review: list[dict] = field(default_factory=list)
+ # Merge conflict status
+ has_merge_conflicts: bool = False # True if PR has conflicts with base branch
+ merge_state_status: str = (
+ "" # BEHIND, BLOCKED, CLEAN, DIRTY, HAS_HOOKS, UNKNOWN, UNSTABLE
+ )
+
+ # CI status - passed to AI orchestrator so it can factor into verdict
+ # Dict with: passing, failing, pending, failed_checks, awaiting_approval
+ ci_status: dict = field(default_factory=dict)
+
# Error flag - if set, context gathering failed and data may be incomplete
error: str | None = None
diff --git a/apps/backend/runners/github/orchestrator.py b/apps/backend/runners/github/orchestrator.py
index 0cfb078efe..22d3e144f6 100644
--- a/apps/backend/runners/github/orchestrator.py
+++ b/apps/backend/runners/github/orchestrator.py
@@ -24,6 +24,8 @@
from .context_gatherer import PRContext, PRContextGatherer
from .gh_client import GHClient
from .models import (
+ BRANCH_BEHIND_BLOCKER_MSG,
+ BRANCH_BEHIND_REASONING,
AICommentTriage,
AICommentVerdict,
AutoFixState,
@@ -50,6 +52,8 @@
from context_gatherer import PRContext, PRContextGatherer
from gh_client import GHClient
from models import (
+ BRANCH_BEHIND_BLOCKER_MSG,
+ BRANCH_BEHIND_REASONING,
AICommentTriage,
AICommentVerdict,
AutoFixState,
@@ -389,17 +393,38 @@ async def review_pr(
pr_number=pr_number,
)
- # Check CI status
- ci_status = await self.gh_client.get_pr_checks(pr_number)
+ # Check CI status (comprehensive - includes workflows awaiting approval)
+ ci_status = await self.gh_client.get_pr_checks_comprehensive(pr_number)
+
+ # Log CI status with awaiting approval info
+ awaiting = ci_status.get("awaiting_approval", 0)
+ pending_without_awaiting = ci_status.get("pending", 0) - awaiting
+ ci_log_parts = [
+ f"{ci_status.get('passing', 0)} passing",
+ f"{ci_status.get('failing', 0)} failing",
+ ]
+ if pending_without_awaiting > 0:
+ ci_log_parts.append(f"{pending_without_awaiting} pending")
+ if awaiting > 0:
+ ci_log_parts.append(f"{awaiting} awaiting approval")
print(
- f"[DEBUG orchestrator] CI status: {ci_status.get('passing', 0)} passing, "
- f"{ci_status.get('failing', 0)} failing, {ci_status.get('pending', 0)} pending",
+ f"[orchestrator] CI status: {', '.join(ci_log_parts)}",
flush=True,
)
+ if awaiting > 0:
+ print(
+ f"[orchestrator] ⚠️ {awaiting} workflow(s) from fork need maintainer approval to run",
+ flush=True,
+ )
- # Generate verdict (now includes CI status)
+ # Generate verdict (includes CI status and merge conflict check)
verdict, verdict_reasoning, blockers = self._generate_verdict(
- findings, structural_issues, ai_triages, ci_status
+ findings,
+ structural_issues,
+ ai_triages,
+ ci_status,
+ has_merge_conflicts=pr_context.has_merge_conflicts,
+ merge_state_status=pr_context.merge_state_status,
)
print(
f"[DEBUG orchestrator] Verdict: {verdict.value} - {verdict_reasoning}",
@@ -430,11 +455,31 @@ async def review_pr(
structural_issues=structural_issues,
ai_triages=ai_triages,
risk_assessment=risk_assessment,
+ ci_status=ci_status,
)
# Get HEAD SHA for follow-up review tracking
head_sha = self.bot_detector.get_last_commit_sha(pr_context.commits)
+ # Get file blob SHAs for rebase-resistant follow-up reviews
+ # Blob SHAs persist across rebases - same content = same blob SHA
+ file_blobs: dict[str, str] = {}
+ try:
+ pr_files = await self.gh_client.get_pr_files(pr_number)
+ for file in pr_files:
+ filename = file.get("filename", "")
+ blob_sha = file.get("sha", "")
+ if filename and blob_sha:
+ file_blobs[filename] = blob_sha
+ print(
+ f"[Review] Captured {len(file_blobs)} file blob SHAs for follow-up tracking",
+ flush=True,
+ )
+ except Exception as e:
+ print(
+ f"[Review] Warning: Could not capture file blobs: {e}", flush=True
+ )
+
# Create result
result = PRReviewResult(
pr_number=pr_number,
@@ -452,6 +497,8 @@ async def review_pr(
quick_scan_summary=quick_scan,
# Track the commit SHA for follow-up reviews
reviewed_commit_sha=head_sha,
+ # Track file blobs for rebase-resistant follow-up reviews
+ reviewed_file_blobs=file_blobs,
)
# Post review if configured
@@ -479,6 +526,9 @@ async def review_pr(
# Save result
await result.save(self.github_dir)
+ # Note: PR review memory is now saved by the Electron app after the review completes
+ # This ensures memory is saved to the embedded LadybugDB managed by the app
+
# Mark as reviewed (head_sha already fetched above)
if head_sha:
self.bot_detector.mark_reviewed(pr_number, head_sha)
@@ -594,19 +644,29 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult:
await result.save(self.github_dir)
return result
- # Check if there are new commits
- if not followup_context.commits_since_review:
+ # Check if there are changes to review (commits OR files via blob comparison)
+ # After a rebase/force-push, commits_since_review will be empty (commit
+ # SHAs are rewritten), but files_changed_since_review will contain files
+ # that actually changed content based on blob SHA comparison.
+ has_commits = bool(followup_context.commits_since_review)
+ has_file_changes = bool(followup_context.files_changed_since_review)
+
+ if not has_commits and not has_file_changes:
+ base_sha = previous_review.reviewed_commit_sha[:8]
print(
- f"[Followup] No new commits since last review at {previous_review.reviewed_commit_sha[:8]}",
+ f"[Followup] No changes since last review at {base_sha}",
flush=True,
)
# Return a result indicating no changes
+ no_change_summary = (
+ "No new commits since last review. Previous findings still apply."
+ )
result = PRReviewResult(
pr_number=pr_number,
repo=self.config.repo,
success=True,
findings=previous_review.findings,
- summary="No new commits since last review. Previous findings still apply.",
+ summary=no_change_summary,
overall_status=previous_review.overall_status,
verdict=previous_review.verdict,
verdict_reasoning="No changes since last review.",
@@ -618,13 +678,26 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult:
await result.save(self.github_dir)
return result
+ # Build progress message based on what changed
+ if has_commits:
+ num_commits = len(followup_context.commits_since_review)
+ change_desc = f"{num_commits} new commits"
+ else:
+ # Rebase detected - files changed but no trackable commits
+ num_files = len(followup_context.files_changed_since_review)
+ change_desc = f"{num_files} files (rebase detected)"
+
self._report_progress(
"analyzing",
30,
- f"Analyzing {len(followup_context.commits_since_review)} new commits...",
+ f"Analyzing {change_desc}...",
pr_number=pr_number,
)
+ # Fetch CI status BEFORE calling reviewer so AI can factor it into verdict
+ ci_status = await self.gh_client.get_pr_checks_comprehensive(pr_number)
+ followup_context.ci_status = ci_status
+
# Use parallel orchestrator for follow-up if enabled
if self.config.use_parallel_orchestrator:
print(
@@ -669,9 +742,9 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult:
)
result = await reviewer.review_followup(followup_context)
- # Check CI status and override verdict if failing
- ci_status = await self.gh_client.get_pr_checks(pr_number)
- failed_checks = ci_status.get("failed_checks", [])
+ # Fallback: ensure CI failures block merge even if AI didn't factor it in
+ # (CI status was already passed to AI via followup_context.ci_status)
+ failed_checks = followup_context.ci_status.get("failed_checks", [])
if failed_checks:
print(
f"[Followup] CI checks failing: {failed_checks}",
@@ -703,6 +776,9 @@ async def followup_review_pr(self, pr_number: int) -> PRReviewResult:
# Save result
await result.save(self.github_dir)
+ # Note: PR review memory is now saved by the Electron app after the review completes
+ # This ensures memory is saved to the embedded LadybugDB managed by the app
+
# Mark as reviewed with new commit SHA
if result.reviewed_commit_sha:
self.bot_detector.mark_reviewed(pr_number, result.reviewed_commit_sha)
@@ -730,15 +806,33 @@ def _generate_verdict(
structural_issues: list[StructuralIssue],
ai_triages: list[AICommentTriage],
ci_status: dict | None = None,
+ has_merge_conflicts: bool = False,
+ merge_state_status: str = "",
) -> tuple[MergeVerdict, str, list[str]]:
"""
- Generate merge verdict based on all findings and CI status.
+ Generate merge verdict based on all findings, CI status, and merge conflicts.
+
+ Blocks on:
+ - Merge conflicts (must be resolved before merging)
+ - Verification failures
+ - Redundancy issues
+ - Failing CI checks
- NEW: Strengthened to block on verification failures, redundancy issues,
- and failing CI checks.
+ Warns on (NEEDS_REVISION):
+ - Branch behind base (out of date)
"""
blockers = []
ci_status = ci_status or {}
+ is_branch_behind = merge_state_status == "BEHIND"
+
+ # CRITICAL: Merge conflicts block merging - check first
+ if has_merge_conflicts:
+ blockers.append(
+ "Merge Conflicts: PR has conflicts with base branch that must be resolved"
+ )
+ # Branch behind base is a warning, not a hard blocker
+ elif is_branch_behind:
+ blockers.append(BRANCH_BEHIND_BLOCKER_MSG)
# Count by severity
critical = [f for f in findings if f.severity == ReviewSeverity.CRITICAL]
@@ -780,6 +874,13 @@ def _generate_verdict(
for check_name in failed_checks:
blockers.append(f"CI Failed: {check_name}")
+ # Workflows awaiting approval block merging (fork PRs)
+ awaiting_approval = ci_status.get("awaiting_approval", 0)
+ if awaiting_approval > 0:
+ blockers.append(
+ f"Workflows Pending: {awaiting_approval} workflow(s) awaiting maintainer approval"
+ )
+
# NEW: Verification failures block merging
for f in verification_failures:
note = f" - {f.verification_note}" if f.verification_note else ""
@@ -812,15 +913,29 @@ def _generate_verdict(
)
blockers.append(f"{t.tool_name}: {summary}")
- # Determine verdict with CI, verification and redundancy checks
+ # Determine verdict with merge conflicts, CI, verification and redundancy checks
if blockers:
+ # Merge conflicts are the highest priority blocker
+ if has_merge_conflicts:
+ verdict = MergeVerdict.BLOCKED
+ reasoning = (
+ "Blocked: PR has merge conflicts with base branch. "
+ "Resolve conflicts before merge."
+ )
# CI failures are always blockers
- if failed_checks:
+ elif failed_checks:
verdict = MergeVerdict.BLOCKED
reasoning = (
f"Blocked: {len(failed_checks)} CI check(s) failing. "
"Fix CI before merge."
)
+ # Workflows awaiting approval block merging
+ elif awaiting_approval > 0:
+ verdict = MergeVerdict.BLOCKED
+ reasoning = (
+ f"Blocked: {awaiting_approval} workflow(s) awaiting approval. "
+ "Approve workflows on GitHub to run CI checks."
+ )
# NEW: Prioritize verification failures
elif verification_failures:
verdict = MergeVerdict.BLOCKED
@@ -842,6 +957,12 @@ def _generate_verdict(
elif len(critical) > 0:
verdict = MergeVerdict.BLOCKED
reasoning = f"Blocked by {len(critical)} critical issues"
+ # Branch behind is a soft blocker - NEEDS_REVISION, not BLOCKED
+ elif is_branch_behind:
+ verdict = MergeVerdict.NEEDS_REVISION
+ reasoning = BRANCH_BEHIND_REASONING
+ if low:
+ reasoning += f" {len(low)} non-blocking suggestion(s) to consider."
else:
verdict = MergeVerdict.NEEDS_REVISION
reasoning = f"{len(blockers)} issues must be addressed"
@@ -925,6 +1046,7 @@ def _generate_enhanced_summary(
structural_issues: list[StructuralIssue],
ai_triages: list[AICommentTriage],
risk_assessment: dict,
+ ci_status: dict | None = None,
) -> str:
"""Generate enhanced summary with verdict, risk, and actionable next steps."""
verdict_emoji = {
@@ -934,8 +1056,19 @@ def _generate_enhanced_summary(
MergeVerdict.BLOCKED: "🔴",
}
+ # Generate bottom line for quick scanning
+ bottom_line = self._generate_bottom_line(
+ verdict=verdict,
+ ci_status=ci_status,
+ blockers=blockers,
+ findings=findings,
+ )
+
lines = [
f"### Merge Verdict: {verdict_emoji.get(verdict, '⚪')} {verdict.value.upper().replace('_', ' ')}",
+ "",
+ f"> {bottom_line}",
+ "",
verdict_reasoning,
"",
"### Risk Assessment",
@@ -1002,6 +1135,70 @@ def _generate_enhanced_summary(
return "\n".join(lines)
+ def _generate_bottom_line(
+ self,
+ verdict: MergeVerdict,
+ ci_status: dict | None,
+ blockers: list[str],
+ findings: list[PRReviewFinding],
+ ) -> str:
+ """Generate a one-line summary for quick scanning at the top of the review."""
+ # Check CI status
+ ci = ci_status or {}
+ pending_ci = ci.get("pending", 0)
+ failing_ci = ci.get("failing", 0)
+ awaiting_approval = ci.get("awaiting_approval", 0)
+
+ # Count blocking findings and issues
+ blocking_findings = [
+ f for f in findings if f.severity.value in ("critical", "high", "medium")
+ ]
+ code_blockers = [
+ b for b in blockers if "CI" not in b and "Merge Conflict" not in b
+ ]
+ has_merge_conflicts = any("Merge Conflict" in b for b in blockers)
+
+ # Determine the bottom line based on verdict and context
+ if verdict == MergeVerdict.READY_TO_MERGE:
+ return (
+ "**✅ Ready to merge** - All checks passing, no blocking issues found."
+ )
+
+ elif verdict == MergeVerdict.BLOCKED:
+ if has_merge_conflicts:
+ return "**🔴 Blocked** - Merge conflicts must be resolved before merge."
+ elif failing_ci > 0:
+ return f"**🔴 Blocked** - {failing_ci} CI check(s) failing. Fix CI before merge."
+ elif awaiting_approval > 0:
+ return "**🔴 Blocked** - Awaiting maintainer approval for fork PR workflow."
+ elif blocking_findings:
+ return f"**🔴 Blocked** - {len(blocking_findings)} critical/high/medium issue(s) must be fixed."
+ else:
+ return "**🔴 Blocked** - Critical issues must be resolved before merge."
+
+ elif verdict == MergeVerdict.NEEDS_REVISION:
+ # Key insight: distinguish "waiting on CI" from "needs code fixes"
+ # Check code issues FIRST before checking pending CI
+ if blocking_findings:
+ return f"**🟠 Needs revision** - {len(blocking_findings)} issue(s) require attention."
+ elif code_blockers:
+ return f"**🟠 Needs revision** - {len(code_blockers)} structural/other issue(s) require attention."
+ elif pending_ci > 0:
+ # Only show "Ready once CI passes" when no code issues exist
+ return f"**⏳ Ready once CI passes** - {pending_ci} check(s) pending, no blocking code issues."
+ else:
+ return "**🟠 Needs revision** - See details below."
+
+ elif verdict == MergeVerdict.MERGE_WITH_CHANGES:
+ if pending_ci > 0:
+ return (
+ "**🟡 Can merge once CI passes** - Minor suggestions, no blockers."
+ )
+ else:
+ return "**🟡 Can merge** - Minor suggestions noted, no blockers."
+
+ return "**📝 Review complete** - See details below."
+
def _format_review_body(self, result: PRReviewResult) -> str:
"""Format the review body for posting to GitHub."""
return result.summary
diff --git a/apps/backend/runners/github/runner.py b/apps/backend/runners/github/runner.py
index 669030e46f..b3934cdc93 100644
--- a/apps/backend/runners/github/runner.py
+++ b/apps/backend/runners/github/runner.py
@@ -56,8 +56,10 @@
# Add backend to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
-# Load .env file
-from dotenv import load_dotenv
+# Load .env file with centralized error handling
+from cli.utils import import_dotenv
+
+load_dotenv = import_dotenv()
env_file = Path(__file__).parent.parent.parent / ".env"
if env_file.exists():
diff --git a/apps/backend/runners/github/services/followup_reviewer.py b/apps/backend/runners/github/services/followup_reviewer.py
index 8b8a24181d..5c1c8bbca0 100644
--- a/apps/backend/runners/github/services/followup_reviewer.py
+++ b/apps/backend/runners/github/services/followup_reviewer.py
@@ -26,6 +26,7 @@
from ..models import FollowupReviewContext, GitHubRunnerConfig
try:
+ from ..gh_client import GHClient
from ..models import (
MergeVerdict,
PRReviewFinding,
@@ -37,6 +38,7 @@
from .prompt_manager import PromptManager
from .pydantic_models import FollowupReviewResponse
except (ImportError, ValueError, SystemError):
+ from gh_client import GHClient
from models import (
MergeVerdict,
PRReviewFinding,
@@ -230,6 +232,27 @@ async def review_followup(
"complete", 100, "Follow-up review complete!", context.pr_number
)
+ # Get file blob SHAs for rebase-resistant follow-up reviews
+ # Blob SHAs persist across rebases - same content = same blob SHA
+ file_blobs: dict[str, str] = {}
+ try:
+ gh_client = GHClient(
+ project_dir=self.project_dir,
+ default_timeout=30.0,
+ repo=self.config.repo,
+ )
+ pr_files = await gh_client.get_pr_files(context.pr_number)
+ for file in pr_files:
+ filename = file.get("filename", "")
+ blob_sha = file.get("sha", "")
+ if filename and blob_sha:
+ file_blobs[filename] = blob_sha
+ logger.info(
+ f"Captured {len(file_blobs)} file blob SHAs for follow-up tracking"
+ )
+ except Exception as e:
+ logger.warning(f"Could not capture file blobs: {e}")
+
return PRReviewResult(
pr_number=context.pr_number,
repo=self.config.repo,
@@ -243,6 +266,7 @@ async def review_followup(
reviewed_at=datetime.now().isoformat(),
# Follow-up specific fields
reviewed_commit_sha=context.current_commit_sha,
+ reviewed_file_blobs=file_blobs,
is_followup_review=True,
previous_review_id=context.previous_review.review_id,
resolved_findings=[f.id for f in resolved],
diff --git a/apps/backend/runners/github/services/parallel_followup_reviewer.py b/apps/backend/runners/github/services/parallel_followup_reviewer.py
index fb7a04365b..bbc23a1c8c 100644
--- a/apps/backend/runners/github/services/parallel_followup_reviewer.py
+++ b/apps/backend/runners/github/services/parallel_followup_reviewer.py
@@ -32,7 +32,11 @@
try:
from ...core.client import create_client
from ...phase_config import get_thinking_budget
+ from ..context_gatherer import _validate_git_ref
+ from ..gh_client import GHClient
from ..models import (
+ BRANCH_BEHIND_BLOCKER_MSG,
+ BRANCH_BEHIND_REASONING,
GitHubRunnerConfig,
MergeVerdict,
PRReviewFinding,
@@ -40,11 +44,16 @@
ReviewSeverity,
)
from .category_utils import map_category
+ from .pr_worktree_manager import PRWorktreeManager
from .pydantic_models import ParallelFollowupResponse
from .sdk_utils import process_sdk_stream
except (ImportError, ValueError, SystemError):
+ from context_gatherer import _validate_git_ref
from core.client import create_client
+ from gh_client import GHClient
from models import (
+ BRANCH_BEHIND_BLOCKER_MSG,
+ BRANCH_BEHIND_REASONING,
GitHubRunnerConfig,
MergeVerdict,
PRReviewFinding,
@@ -53,6 +62,7 @@
)
from phase_config import get_thinking_budget
from services.category_utils import map_category
+ from services.pr_worktree_manager import PRWorktreeManager
from services.pydantic_models import ParallelFollowupResponse
from services.sdk_utils import process_sdk_stream
@@ -62,6 +72,9 @@
# Check if debug mode is enabled
DEBUG_MODE = os.environ.get("DEBUG", "").lower() in ("true", "1", "yes")
+# Directory for PR review worktrees (shared with initial reviewer)
+PR_WORKTREE_DIR = ".auto-claude/github/pr/worktrees"
+
# Severity mapping for AI responses
_SEVERITY_MAPPING = {
"critical": ReviewSeverity.CRITICAL,
@@ -106,6 +119,7 @@ def __init__(
self.github_dir = Path(github_dir)
self.config = config
self.progress_callback = progress_callback
+ self.worktree_manager = PRWorktreeManager(project_dir, PR_WORKTREE_DIR)
def _report_progress(self, phase: str, progress: int, message: str, **kwargs):
"""Report progress if callback is set."""
@@ -136,6 +150,37 @@ def _load_prompt(self, filename: str) -> str:
logger.warning(f"Prompt file not found: {prompt_file}")
return ""
+ def _create_pr_worktree(self, head_sha: str, pr_number: int) -> Path:
+ """Create a temporary worktree at the PR head commit.
+
+ Args:
+ head_sha: The commit SHA of the PR head (validated before use)
+ pr_number: The PR number for naming
+
+ Returns:
+ Path to the created worktree
+
+ Raises:
+ RuntimeError: If worktree creation fails
+ ValueError: If head_sha fails validation (command injection prevention)
+ """
+ # SECURITY: Validate git ref before use in subprocess calls
+ if not _validate_git_ref(head_sha):
+ raise ValueError(
+ f"Invalid git ref: '{head_sha}'. "
+ "Must contain only alphanumeric characters, dots, slashes, underscores, and hyphens."
+ )
+
+ return self.worktree_manager.create_worktree(head_sha, pr_number)
+
+ def _cleanup_pr_worktree(self, worktree_path: Path) -> None:
+ """Remove a temporary PR review worktree with fallback chain.
+
+ Args:
+ worktree_path: Path to the worktree to remove
+ """
+ self.worktree_manager.remove_worktree(worktree_path)
+
def _define_specialist_agents(self) -> dict[str, AgentDefinition]:
"""
Define specialist agents for follow-up review.
@@ -265,6 +310,44 @@ def _format_ai_reviews(self, context: FollowupReviewContext) -> str:
return "\n\n---\n\n".join(ai_content)
+ def _format_ci_status(self, context: FollowupReviewContext) -> str:
+ """Format CI status for the prompt."""
+ ci_status = context.ci_status
+ if not ci_status:
+ return "CI status not available."
+
+ passing = ci_status.get("passing", 0)
+ failing = ci_status.get("failing", 0)
+ pending = ci_status.get("pending", 0)
+ failed_checks = ci_status.get("failed_checks", [])
+ awaiting_approval = ci_status.get("awaiting_approval", 0)
+
+ lines = []
+
+ # Overall status
+ if failing > 0:
+ lines.append(f"⚠️ **{failing} CI check(s) FAILING** - PR cannot be merged")
+ elif pending > 0:
+ lines.append(f"⏳ **{pending} CI check(s) pending** - Wait for completion")
+ elif passing > 0:
+ lines.append(f"✅ **All {passing} CI check(s) passing**")
+ else:
+ lines.append("No CI checks configured")
+
+ # List failed checks
+ if failed_checks:
+ lines.append("\n**Failed checks:**")
+ for check in failed_checks:
+ lines.append(f" - ❌ {check}")
+
+ # Awaiting approval (fork PRs)
+ if awaiting_approval > 0:
+ lines.append(
+ f"\n⏸️ **{awaiting_approval} workflow(s) awaiting maintainer approval** (fork PR)"
+ )
+
+ return "\n".join(lines)
+
def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str:
"""Build full prompt for orchestrator with follow-up context."""
# Load orchestrator prompt
@@ -277,6 +360,7 @@ def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str:
commits = self._format_commits(context)
contributor_comments = self._format_comments(context)
ai_reviews = self._format_ai_reviews(context)
+ ci_status = self._format_ci_status(context)
# Truncate diff if too long
MAX_DIFF_CHARS = 100_000
@@ -295,6 +379,9 @@ def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str:
**New Commits:** {len(context.commits_since_review)}
**Files Changed:** {len(context.files_changed_since_review)}
+### CI Status (CRITICAL - Must Factor Into Verdict)
+{ci_status}
+
### Previous Review Summary
{context.previous_review.summary[:500] if context.previous_review.summary else "No summary available."}
@@ -323,6 +410,7 @@ def _build_orchestrator_prompt(self, context: FollowupReviewContext) -> str:
Now analyze this follow-up and delegate to the appropriate specialist agents.
Remember: YOU decide which agents to invoke based on YOUR analysis.
The SDK will run invoked agents in parallel automatically.
+**CRITICAL: Your verdict MUST account for CI status. Failing CI = BLOCKED verdict.**
"""
return base_prompt + followup_context
@@ -341,6 +429,9 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult:
f"[ParallelFollowup] Starting follow-up review for PR #{context.pr_number}"
)
+ # Track worktree for cleanup
+ worktree_path: Path | None = None
+
try:
self._report_progress(
"orchestrating",
@@ -352,13 +443,48 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult:
# Build orchestrator prompt
prompt = self._build_orchestrator_prompt(context)
- # Get project root
+ # Get project root - default to local checkout
project_root = (
self.project_dir.parent.parent
if self.project_dir.name == "backend"
else self.project_dir
)
+ # Create temporary worktree at PR head commit for isolated review
+ # This ensures agents read from the correct PR state, not the current checkout
+ head_sha = context.current_commit_sha
+ if head_sha and _validate_git_ref(head_sha):
+ try:
+ if DEBUG_MODE:
+ print(
+ f"[Followup] DEBUG: Creating worktree for head_sha={head_sha}",
+ flush=True,
+ )
+ worktree_path = self._create_pr_worktree(
+ head_sha, context.pr_number
+ )
+ project_root = worktree_path
+ print(
+ f"[Followup] Using worktree at {worktree_path.name} for PR review",
+ flush=True,
+ )
+ except Exception as e:
+ if DEBUG_MODE:
+ print(
+ f"[Followup] DEBUG: Worktree creation FAILED: {e}",
+ flush=True,
+ )
+ logger.warning(
+ f"[ParallelFollowup] Worktree creation failed, "
+ f"falling back to local checkout: {e}"
+ )
+ # Fallback to original behavior if worktree creation fails
+ else:
+ logger.warning(
+ f"[ParallelFollowup] Invalid or missing head_sha '{head_sha}', "
+ "using local checkout"
+ )
+
# Use model and thinking level from config (user settings)
model = self.config.model or "claude-sonnet-4-5-20250929"
thinking_level = self.config.thinking_level or "medium"
@@ -459,15 +585,60 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult:
f"{len(resolved_ids)} resolved, {len(unresolved_ids)} unresolved"
)
+ # Generate blockers from critical/high/medium severity findings
+ # (Medium also blocks merge in our strict quality gates approach)
+ blockers = []
+
+ # CRITICAL: Merge conflicts block merging - check FIRST before summary generation
+ # This must happen before _generate_summary so the summary reflects merge conflict status
+ if context.has_merge_conflicts:
+ blockers.append(
+ "Merge Conflicts: PR has conflicts with base branch that must be resolved"
+ )
+ # Override verdict to BLOCKED if merge conflicts exist
+ verdict = MergeVerdict.BLOCKED
+ verdict_reasoning = (
+ "Blocked: PR has merge conflicts with base branch. "
+ "Resolve conflicts before merge."
+ )
+ print(
+ "[ParallelFollowup] ⚠️ PR has merge conflicts - blocking merge",
+ flush=True,
+ )
+ # Check if branch is behind base (out of date) - warning, not hard blocker
+ elif context.merge_state_status == "BEHIND":
+ blockers.append(BRANCH_BEHIND_BLOCKER_MSG)
+ # Use NEEDS_REVISION since potential conflicts are unknown until branch is updated
+ # Must handle both READY_TO_MERGE and MERGE_WITH_CHANGES verdicts
+ if verdict in (
+ MergeVerdict.READY_TO_MERGE,
+ MergeVerdict.MERGE_WITH_CHANGES,
+ ):
+ verdict = MergeVerdict.NEEDS_REVISION
+ verdict_reasoning = BRANCH_BEHIND_REASONING
+ print(
+ "[ParallelFollowup] ⚠️ PR branch is behind base - needs update",
+ flush=True,
+ )
+
+ for finding in unique_findings:
+ if finding.severity in (
+ ReviewSeverity.CRITICAL,
+ ReviewSeverity.HIGH,
+ ReviewSeverity.MEDIUM,
+ ):
+ blockers.append(f"{finding.category.value}: {finding.title}")
+
# Extract validation counts
dismissed_count = len(result_data.get("dismissed_false_positive_ids", []))
confirmed_count = result_data.get("confirmed_valid_count", 0)
needs_human_count = result_data.get("needs_human_review_count", 0)
- # Generate summary
+ # Generate summary (AFTER merge conflict check so it reflects correct verdict)
summary = self._generate_summary(
verdict=verdict,
verdict_reasoning=verdict_reasoning,
+ blockers=blockers,
resolved_count=len(resolved_ids),
unresolved_count=len(unresolved_ids),
new_count=len(new_finding_ids),
@@ -475,6 +646,7 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult:
dismissed_false_positive_count=dismissed_count,
confirmed_valid_count=confirmed_count,
needs_human_review_count=needs_human_count,
+ ci_status=context.ci_status,
)
# Map verdict to overall_status
@@ -487,16 +659,26 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult:
else:
overall_status = "approve"
- # Generate blockers from critical/high/medium severity findings
- # (Medium also blocks merge in our strict quality gates approach)
- blockers = []
- for finding in unique_findings:
- if finding.severity in (
- ReviewSeverity.CRITICAL,
- ReviewSeverity.HIGH,
- ReviewSeverity.MEDIUM,
- ):
- blockers.append(f"{finding.category.value}: {finding.title}")
+ # Get file blob SHAs for rebase-resistant follow-up reviews
+ # Blob SHAs persist across rebases - same content = same blob SHA
+ file_blobs: dict[str, str] = {}
+ try:
+ gh_client = GHClient(
+ project_dir=self.project_dir,
+ default_timeout=30.0,
+ repo=self.config.repo,
+ )
+ pr_files = await gh_client.get_pr_files(context.pr_number)
+ for file in pr_files:
+ filename = file.get("filename", "")
+ blob_sha = file.get("sha", "")
+ if filename and blob_sha:
+ file_blobs[filename] = blob_sha
+ logger.info(
+ f"Captured {len(file_blobs)} file blob SHAs for follow-up tracking"
+ )
+ except Exception as e:
+ logger.warning(f"Could not capture file blobs: {e}")
result = PRReviewResult(
pr_number=context.pr_number,
@@ -509,6 +691,7 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult:
verdict_reasoning=verdict_reasoning,
blockers=blockers,
reviewed_commit_sha=context.current_commit_sha,
+ reviewed_file_blobs=file_blobs,
is_followup_review=True,
previous_review_id=context.previous_review.review_id
or context.previous_review.pr_number,
@@ -543,6 +726,10 @@ async def review(self, context: FollowupReviewContext) -> PRReviewResult:
is_followup_review=True,
reviewed_commit_sha=context.current_commit_sha,
)
+ finally:
+ # Always cleanup worktree, even on error
+ if worktree_path:
+ self._cleanup_pr_worktree(worktree_path)
def _parse_structured_output(
self, data: dict, context: FollowupReviewContext
@@ -614,13 +801,11 @@ def _parse_structured_output(
validation = validation_map.get(rv.finding_id)
validation_status = None
validation_evidence = None
- validation_confidence = None
validation_explanation = None
if validation:
validation_status = validation.validation_status
validation_evidence = validation.code_evidence
- validation_confidence = validation.confidence
validation_explanation = validation.explanation
findings.append(
@@ -636,7 +821,6 @@ def _parse_structured_output(
fixable=original.fixable,
validation_status=validation_status,
validation_evidence=validation_evidence,
- validation_confidence=validation_confidence,
validation_explanation=validation_explanation,
)
)
@@ -805,6 +989,7 @@ def _generate_summary(
self,
verdict: MergeVerdict,
verdict_reasoning: str,
+ blockers: list[str],
resolved_count: int,
unresolved_count: int,
new_count: int,
@@ -812,13 +997,15 @@ def _generate_summary(
dismissed_false_positive_count: int = 0,
confirmed_valid_count: int = 0,
needs_human_review_count: int = 0,
+ ci_status: dict | None = None,
) -> str:
"""Generate a human-readable summary of the follow-up review."""
+ # Use same emojis as orchestrator.py for consistency
status_emoji = {
MergeVerdict.READY_TO_MERGE: "✅",
- MergeVerdict.MERGE_WITH_CHANGES: "⚠️",
- MergeVerdict.NEEDS_REVISION: "🔄",
- MergeVerdict.BLOCKED: "🚫",
+ MergeVerdict.MERGE_WITH_CHANGES: "🟡",
+ MergeVerdict.NEEDS_REVISION: "🟠",
+ MergeVerdict.BLOCKED: "🔴",
}
emoji = status_emoji.get(verdict, "📝")
@@ -826,6 +1013,15 @@ def _generate_summary(
", ".join(agents_invoked) if agents_invoked else "orchestrator only"
)
+ # Generate a prominent bottom-line summary for quick scanning
+ bottom_line = self._generate_bottom_line(
+ verdict=verdict,
+ ci_status=ci_status,
+ unresolved_count=unresolved_count,
+ new_count=new_count,
+ blockers=blockers,
+ )
+
# Build validation section if there are validation results
validation_section = ""
if (
@@ -838,15 +1034,26 @@ def _generate_summary(
- 🔍 **Dismissed as False Positives**: {dismissed_false_positive_count} findings were re-investigated and found to be incorrect
- ✓ **Confirmed Valid**: {confirmed_valid_count} findings verified as genuine issues
- 👤 **Needs Human Review**: {needs_human_review_count} findings require manual verification
+"""
+
+ # Build blockers section if there are any blockers
+ blockers_section = ""
+ if blockers:
+ blockers_list = "\n".join(f"- {b}" for b in blockers)
+ blockers_section = f"""
+### 🚨 Blocking Issues
+{blockers_list}
"""
summary = f"""## {emoji} Follow-up Review: {verdict.value.replace("_", " ").title()}
+> {bottom_line}
+
### Resolution Status
- ✅ **Resolved**: {resolved_count} previous findings addressed
- ❌ **Unresolved**: {unresolved_count} previous findings remain
- 🆕 **New Issues**: {new_count} new findings in recent changes
-{validation_section}
+{validation_section}{blockers_section}
### Verdict
{verdict_reasoning}
@@ -857,3 +1064,65 @@ def _generate_summary(
*This is an AI-generated follow-up review using parallel specialist analysis with finding validation.*
"""
return summary
+
+ def _generate_bottom_line(
+ self,
+ verdict: MergeVerdict,
+ ci_status: dict | None,
+ unresolved_count: int,
+ new_count: int,
+ blockers: list[str],
+ ) -> str:
+ """Generate a one-line summary for quick scanning at the top of the review."""
+ # Check CI status
+ ci = ci_status or {}
+ pending_ci = ci.get("pending", 0)
+ failing_ci = ci.get("failing", 0)
+ awaiting_approval = ci.get("awaiting_approval", 0)
+
+ # Count blocking issues (excluding CI-related ones)
+ code_blockers = [
+ b for b in blockers if "CI" not in b and "Merge Conflict" not in b
+ ]
+ has_merge_conflicts = any("Merge Conflict" in b for b in blockers)
+
+ # Determine the bottom line based on verdict and context
+ if verdict == MergeVerdict.READY_TO_MERGE:
+ return "**✅ Ready to merge** - All checks passing and findings addressed."
+
+ elif verdict == MergeVerdict.BLOCKED:
+ if has_merge_conflicts:
+ return "**🔴 Blocked** - Merge conflicts must be resolved before merge."
+ elif failing_ci > 0:
+ return f"**🔴 Blocked** - {failing_ci} CI check(s) failing. Fix CI before merge."
+ elif awaiting_approval > 0:
+ return "**🔴 Blocked** - Awaiting maintainer approval for fork PR workflow."
+ elif code_blockers:
+ return f"**🔴 Blocked** - {len(code_blockers)} blocking issue(s) require fixes."
+ else:
+ return "**🔴 Blocked** - Critical issues must be resolved before merge."
+
+ elif verdict == MergeVerdict.NEEDS_REVISION:
+ # Key insight: distinguish "waiting on CI" from "needs code fixes"
+ # Check code issues FIRST before checking pending CI
+ if unresolved_count > 0:
+ return f"**🟠 Needs revision** - {unresolved_count} unresolved finding(s) from previous review."
+ elif code_blockers:
+ return f"**🟠 Needs revision** - {len(code_blockers)} blocking issue(s) require fixes."
+ elif new_count > 0:
+ return f"**🟠 Needs revision** - {new_count} new issue(s) found in recent changes."
+ elif pending_ci > 0:
+ # Only show "Ready once CI passes" when no code issues exist
+ return f"**⏳ Ready once CI passes** - {pending_ci} check(s) pending, all findings addressed."
+ else:
+ return "**🟠 Needs revision** - See details below."
+
+ elif verdict == MergeVerdict.MERGE_WITH_CHANGES:
+ if pending_ci > 0:
+ return (
+ "**🟡 Can merge once CI passes** - Minor suggestions, no blockers."
+ )
+ else:
+ return "**🟡 Can merge** - Minor suggestions noted, no blockers."
+
+ return "**📝 Review complete** - See details below."
diff --git a/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py b/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py
index 7b7fe00c54..254f5087fd 100644
--- a/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py
+++ b/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py
@@ -20,9 +20,6 @@
import hashlib
import logging
import os
-import shutil
-import subprocess
-import uuid
from pathlib import Path
from typing import Any
@@ -32,7 +29,10 @@
from ...core.client import create_client
from ...phase_config import get_thinking_budget
from ..context_gatherer import PRContext, _validate_git_ref
+ from ..gh_client import GHClient
from ..models import (
+ BRANCH_BEHIND_BLOCKER_MSG,
+ BRANCH_BEHIND_REASONING,
GitHubRunnerConfig,
MergeVerdict,
PRReviewFinding,
@@ -40,12 +40,16 @@
ReviewSeverity,
)
from .category_utils import map_category
+ from .pr_worktree_manager import PRWorktreeManager
from .pydantic_models import ParallelOrchestratorResponse
from .sdk_utils import process_sdk_stream
except (ImportError, ValueError, SystemError):
from context_gatherer import PRContext, _validate_git_ref
from core.client import create_client
+ from gh_client import GHClient
from models import (
+ BRANCH_BEHIND_BLOCKER_MSG,
+ BRANCH_BEHIND_REASONING,
GitHubRunnerConfig,
MergeVerdict,
PRReviewFinding,
@@ -54,6 +58,7 @@
)
from phase_config import get_thinking_budget
from services.category_utils import map_category
+ from services.pr_worktree_manager import PRWorktreeManager
from services.pydantic_models import ParallelOrchestratorResponse
from services.sdk_utils import process_sdk_stream
@@ -92,6 +97,7 @@ def __init__(
self.github_dir = Path(github_dir)
self.config = config
self.progress_callback = progress_callback
+ self.worktree_manager = PRWorktreeManager(project_dir, PR_WORKTREE_DIR)
def _report_progress(self, phase: str, progress: int, message: str, **kwargs):
"""Report progress if callback is set."""
@@ -143,78 +149,7 @@ def _create_pr_worktree(self, head_sha: str, pr_number: int) -> Path:
"Must contain only alphanumeric characters, dots, slashes, underscores, and hyphens."
)
- worktree_name = f"pr-{pr_number}-{uuid.uuid4().hex[:8]}"
- worktree_dir = self.project_dir / PR_WORKTREE_DIR
-
- if DEBUG_MODE:
- print(f"[PRReview] DEBUG: project_dir={self.project_dir}", flush=True)
- print(f"[PRReview] DEBUG: worktree_dir={worktree_dir}", flush=True)
- print(f"[PRReview] DEBUG: head_sha={head_sha}", flush=True)
-
- worktree_dir.mkdir(parents=True, exist_ok=True)
- worktree_path = worktree_dir / worktree_name
-
- if DEBUG_MODE:
- print(f"[PRReview] DEBUG: worktree_path={worktree_path}", flush=True)
- print(
- f"[PRReview] DEBUG: worktree_dir exists={worktree_dir.exists()}",
- flush=True,
- )
-
- # Fetch the commit if not available locally (handles fork PRs)
- fetch_result = subprocess.run(
- ["git", "fetch", "origin", head_sha],
- cwd=self.project_dir,
- capture_output=True,
- text=True,
- timeout=60,
- )
- if DEBUG_MODE:
- print(
- f"[PRReview] DEBUG: fetch returncode={fetch_result.returncode}",
- flush=True,
- )
- if fetch_result.stderr:
- print(
- f"[PRReview] DEBUG: fetch stderr={fetch_result.stderr[:200]}",
- flush=True,
- )
-
- # Create detached worktree at the PR commit
- result = subprocess.run(
- ["git", "worktree", "add", "--detach", str(worktree_path), head_sha],
- cwd=self.project_dir,
- capture_output=True,
- text=True,
- timeout=120, # Worktree add can be slow for large repos
- )
-
- if DEBUG_MODE:
- print(
- f"[PRReview] DEBUG: worktree add returncode={result.returncode}",
- flush=True,
- )
- if result.stderr:
- print(
- f"[PRReview] DEBUG: worktree add stderr={result.stderr[:200]}",
- flush=True,
- )
- if result.stdout:
- print(
- f"[PRReview] DEBUG: worktree add stdout={result.stdout[:200]}",
- flush=True,
- )
-
- if result.returncode != 0:
- raise RuntimeError(f"Failed to create worktree: {result.stderr}")
-
- if DEBUG_MODE:
- print(
- f"[PRReview] DEBUG: worktree created, exists={worktree_path.exists()}",
- flush=True,
- )
- logger.info(f"[PRReview] Created worktree at {worktree_path}")
- return worktree_path
+ return self.worktree_manager.create_worktree(head_sha, pr_number)
def _cleanup_pr_worktree(self, worktree_path: Path) -> None:
"""Remove a temporary PR review worktree with fallback chain.
@@ -222,100 +157,16 @@ def _cleanup_pr_worktree(self, worktree_path: Path) -> None:
Args:
worktree_path: Path to the worktree to remove
"""
- if DEBUG_MODE:
- print(
- f"[PRReview] DEBUG: _cleanup_pr_worktree called with {worktree_path}",
- flush=True,
- )
-
- if not worktree_path or not worktree_path.exists():
- if DEBUG_MODE:
- print(
- "[PRReview] DEBUG: worktree path doesn't exist, skipping cleanup",
- flush=True,
- )
- return
-
- if DEBUG_MODE:
- print(
- f"[PRReview] DEBUG: Attempting to remove worktree at {worktree_path}",
- flush=True,
- )
-
- # Try 1: git worktree remove
- result = subprocess.run(
- ["git", "worktree", "remove", "--force", str(worktree_path)],
- cwd=self.project_dir,
- capture_output=True,
- text=True,
- timeout=30,
- )
-
- if DEBUG_MODE:
- print(
- f"[PRReview] DEBUG: worktree remove returncode={result.returncode}",
- flush=True,
- )
-
- if result.returncode == 0:
- logger.info(f"[PRReview] Cleaned up worktree: {worktree_path.name}")
- return
-
- # Try 2: shutil.rmtree fallback
- try:
- shutil.rmtree(worktree_path, ignore_errors=True)
- subprocess.run(
- ["git", "worktree", "prune"],
- cwd=self.project_dir,
- capture_output=True,
- timeout=30,
- )
- logger.warning(f"[PRReview] Used shutil fallback for: {worktree_path.name}")
- except Exception as e:
- logger.error(f"[PRReview] Failed to cleanup worktree {worktree_path}: {e}")
+ self.worktree_manager.remove_worktree(worktree_path)
def _cleanup_stale_pr_worktrees(self) -> None:
- """Clean up orphaned PR review worktrees on startup."""
- worktree_dir = self.project_dir / PR_WORKTREE_DIR
- if not worktree_dir.exists():
- return
-
- # Get registered worktrees from git
- result = subprocess.run(
- ["git", "worktree", "list", "--porcelain"],
- cwd=self.project_dir,
- capture_output=True,
- text=True,
- timeout=30,
- )
- registered = set()
- for line in result.stdout.split("\n"):
- if line.startswith("worktree "):
- # Safely parse - check bounds to prevent IndexError
- parts = line.split(" ", 1)
- if len(parts) > 1 and parts[1]:
- registered.add(Path(parts[1]))
-
- # Remove unregistered directories
- stale_count = 0
- for item in worktree_dir.iterdir():
- if item.is_dir() and item not in registered:
- logger.info(f"[PRReview] Removing stale worktree: {item.name}")
- shutil.rmtree(item, ignore_errors=True)
- stale_count += 1
-
- if stale_count > 0:
- subprocess.run(
- ["git", "worktree", "prune"],
- cwd=self.project_dir,
- capture_output=True,
- timeout=30,
+ """Clean up orphaned, expired, and excess PR review worktrees on startup."""
+ stats = self.worktree_manager.cleanup_worktrees()
+ if stats["total"] > 0:
+ logger.info(
+ f"[PRReview] Cleanup: removed {stats['total']} worktrees "
+ f"(orphaned={stats['orphaned']}, expired={stats['expired']}, excess={stats['excess']})"
)
- if DEBUG_MODE:
- print(
- f"[PRReview] DEBUG: Cleaned up {stale_count} stale worktree(s)",
- flush=True,
- )
def _define_specialist_agents(self) -> dict[str, AgentDefinition]:
"""
@@ -584,7 +435,7 @@ def _create_finding_from_structured(self, finding_data: Any) -> PRReviewFinding:
category=category,
severity=severity,
suggested_fix=finding_data.suggested_fix or "",
- confidence=self._normalize_confidence(finding_data.confidence),
+ evidence=finding_data.evidence,
)
async def review(self, context: PRContext) -> PRReviewResult:
@@ -769,9 +620,11 @@ async def review(self, context: PRContext) -> PRReviewResult:
f"[ParallelOrchestrator] Review complete: {len(unique_findings)} findings"
)
- # Generate verdict
+ # Generate verdict (includes merge conflict check and branch-behind check)
verdict, verdict_reasoning, blockers = self._generate_verdict(
- unique_findings
+ unique_findings,
+ has_merge_conflicts=context.has_merge_conflicts,
+ merge_state_status=context.merge_state_status,
)
# Generate summary
@@ -799,6 +652,27 @@ async def review(self, context: PRContext) -> PRReviewResult:
latest_commit = context.commits[-1]
head_sha = latest_commit.get("oid") or latest_commit.get("sha")
+ # Get file blob SHAs for rebase-resistant follow-up reviews
+ # Blob SHAs persist across rebases - same content = same blob SHA
+ file_blobs: dict[str, str] = {}
+ try:
+ gh_client = GHClient(
+ project_dir=self.project_dir,
+ default_timeout=30.0,
+ repo=self.config.repo,
+ )
+ pr_files = await gh_client.get_pr_files(context.pr_number)
+ for file in pr_files:
+ filename = file.get("filename", "")
+ blob_sha = file.get("sha", "")
+ if filename and blob_sha:
+ file_blobs[filename] = blob_sha
+ logger.info(
+ f"Captured {len(file_blobs)} file blob SHAs for follow-up tracking"
+ )
+ except Exception as e:
+ logger.warning(f"Could not capture file blobs: {e}")
+
result = PRReviewResult(
pr_number=context.pr_number,
repo=self.config.repo,
@@ -810,6 +684,7 @@ async def review(self, context: PRContext) -> PRReviewResult:
verdict_reasoning=verdict_reasoning,
blockers=blockers,
reviewed_commit_sha=head_sha,
+ reviewed_file_blobs=file_blobs,
)
self._report_progress(
@@ -945,7 +820,7 @@ def _create_finding_from_dict(self, f_data: dict[str, Any]) -> PRReviewFinding:
category=category,
severity=severity,
suggested_fix=f_data.get("suggested_fix", ""),
- confidence=self._normalize_confidence(f_data.get("confidence", 85)),
+ evidence=f_data.get("evidence"),
)
def _parse_text_output(self, output: str) -> list[PRReviewFinding]:
@@ -993,10 +868,23 @@ def _deduplicate_findings(
return unique
def _generate_verdict(
- self, findings: list[PRReviewFinding]
+ self,
+ findings: list[PRReviewFinding],
+ has_merge_conflicts: bool = False,
+ merge_state_status: str = "",
) -> tuple[MergeVerdict, str, list[str]]:
- """Generate merge verdict based on findings."""
+ """Generate merge verdict based on findings, merge conflict status, and branch state."""
blockers = []
+ is_branch_behind = merge_state_status == "BEHIND"
+
+ # CRITICAL: Merge conflicts block merging - check first
+ if has_merge_conflicts:
+ blockers.append(
+ "Merge Conflicts: PR has conflicts with base branch that must be resolved"
+ )
+ # Branch behind base is a warning, not a hard blocker
+ elif is_branch_behind:
+ blockers.append(BRANCH_BEHIND_BLOCKER_MSG)
critical = [f for f in findings if f.severity == ReviewSeverity.CRITICAL]
high = [f for f in findings if f.severity == ReviewSeverity.HIGH]
@@ -1007,8 +895,25 @@ def _generate_verdict(
blockers.append(f"Critical: {f.title} ({f.file}:{f.line})")
if blockers:
- verdict = MergeVerdict.BLOCKED
- reasoning = f"Blocked by {len(blockers)} critical issue(s)"
+ # Merge conflicts are the highest priority blocker
+ if has_merge_conflicts:
+ verdict = MergeVerdict.BLOCKED
+ reasoning = (
+ "Blocked: PR has merge conflicts with base branch. "
+ "Resolve conflicts before merge."
+ )
+ elif critical:
+ verdict = MergeVerdict.BLOCKED
+ reasoning = f"Blocked by {len(critical)} critical issue(s)"
+ # Branch behind is a soft blocker - NEEDS_REVISION, not BLOCKED
+ elif is_branch_behind:
+ verdict = MergeVerdict.NEEDS_REVISION
+ reasoning = BRANCH_BEHIND_REASONING
+ if low:
+ reasoning += f" {len(low)} non-blocking suggestion(s) to consider."
+ else:
+ verdict = MergeVerdict.BLOCKED
+ reasoning = f"Blocked by {len(blockers)} issue(s)"
elif high or medium:
# High and Medium severity findings block merge
verdict = MergeVerdict.NEEDS_REVISION
diff --git a/apps/backend/runners/github/services/pr_review_engine.py b/apps/backend/runners/github/services/pr_review_engine.py
index 24d1fb69f0..d8832539e7 100644
--- a/apps/backend/runners/github/services/pr_review_engine.py
+++ b/apps/backend/runners/github/services/pr_review_engine.py
@@ -242,7 +242,9 @@ async def run_review_pass(
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
result_text += block.text
if review_pass == ReviewPass.QUICK_SCAN:
@@ -502,7 +504,9 @@ async def _run_structural_pass(self, context: PRContext) -> str:
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
result_text += block.text
except Exception as e:
print(f"[AI] Structural pass error: {e}", flush=True)
@@ -558,7 +562,9 @@ async def _run_ai_triage_pass(self, context: PRContext) -> str:
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
result_text += block.text
except Exception as e:
print(f"[AI] AI triage pass error: {e}", flush=True)
diff --git a/apps/backend/runners/github/services/pr_worktree_manager.py b/apps/backend/runners/github/services/pr_worktree_manager.py
new file mode 100644
index 0000000000..0518dc4929
--- /dev/null
+++ b/apps/backend/runners/github/services/pr_worktree_manager.py
@@ -0,0 +1,437 @@
+"""
+PR Worktree Manager
+===================
+
+Manages lifecycle of PR review worktrees with cleanup policies.
+
+Features:
+- Age-based cleanup (remove worktrees older than N days)
+- Count-based cleanup (keep only N most recent worktrees)
+- Orphaned worktree cleanup (worktrees not registered with git)
+- Automatic cleanup on review completion
+"""
+
+from __future__ import annotations
+
+import logging
+import os
+import shutil
+import subprocess
+import time
+from pathlib import Path
+from typing import NamedTuple
+
+logger = logging.getLogger(__name__)
+
+# Default cleanup policies (can be overridden via environment variables)
+DEFAULT_MAX_PR_WORKTREES = 10 # Max worktrees to keep
+DEFAULT_PR_WORKTREE_MAX_AGE_DAYS = 7 # Max age in days
+
+
+def _get_max_pr_worktrees() -> int:
+ """Get max worktrees setting, read at runtime for testability."""
+ try:
+ value = int(os.environ.get("MAX_PR_WORKTREES", str(DEFAULT_MAX_PR_WORKTREES)))
+ return value if value > 0 else DEFAULT_MAX_PR_WORKTREES
+ except (ValueError, TypeError):
+ return DEFAULT_MAX_PR_WORKTREES
+
+
+def _get_max_age_days() -> int:
+ """Get max age setting, read at runtime for testability."""
+ try:
+ value = int(
+ os.environ.get(
+ "PR_WORKTREE_MAX_AGE_DAYS", str(DEFAULT_PR_WORKTREE_MAX_AGE_DAYS)
+ )
+ )
+ return value if value >= 0 else DEFAULT_PR_WORKTREE_MAX_AGE_DAYS
+ except (ValueError, TypeError):
+ return DEFAULT_PR_WORKTREE_MAX_AGE_DAYS
+
+
+# Safe pattern for git refs (SHA, branch names)
+# Allows: alphanumeric, dots, underscores, hyphens, forward slashes
+import re
+
+SAFE_REF_PATTERN = re.compile(r"^[a-zA-Z0-9._/\-]+$")
+
+
+class WorktreeInfo(NamedTuple):
+ """Information about a PR worktree."""
+
+ path: Path
+ age_days: float
+ pr_number: int | None = None
+
+
+class PRWorktreeManager:
+ """
+ Manages PR review worktrees with automatic cleanup policies.
+
+ Cleanup policies:
+ 1. Remove worktrees older than PR_WORKTREE_MAX_AGE_DAYS (default: 7 days)
+ 2. Keep only MAX_PR_WORKTREES most recent worktrees (default: 10)
+ 3. Remove orphaned worktrees (not registered with git)
+ """
+
+ def __init__(self, project_dir: Path, worktree_dir: str | Path):
+ """
+ Initialize the worktree manager.
+
+ Args:
+ project_dir: Root directory of the git project
+ worktree_dir: Directory where PR worktrees are stored (relative to project_dir)
+ """
+ self.project_dir = Path(project_dir)
+ self.worktree_base_dir = self.project_dir / worktree_dir
+
+ def create_worktree(
+ self, head_sha: str, pr_number: int, auto_cleanup: bool = True
+ ) -> Path:
+ """
+ Create a PR worktree with automatic cleanup of old worktrees.
+
+ Args:
+ head_sha: Git commit SHA to checkout
+ pr_number: PR number for naming
+ auto_cleanup: If True (default), run cleanup before creating
+
+ Returns:
+ Path to the created worktree
+
+ Raises:
+ RuntimeError: If worktree creation fails
+ ValueError: If head_sha or pr_number are invalid
+ """
+ # Validate inputs to prevent command injection
+ if not head_sha or not SAFE_REF_PATTERN.match(head_sha):
+ raise ValueError(
+ f"Invalid head_sha: must match pattern {SAFE_REF_PATTERN.pattern}"
+ )
+ if not isinstance(pr_number, int) or pr_number <= 0:
+ raise ValueError(
+ f"Invalid pr_number: must be a positive integer, got {pr_number}"
+ )
+
+ # Run cleanup before creating new worktree (can be disabled for tests)
+ if auto_cleanup:
+ self.cleanup_worktrees()
+
+ # Generate worktree name with timestamp for uniqueness
+ sha_short = head_sha[:8]
+ timestamp = int(time.time() * 1000) # Millisecond precision
+ worktree_name = f"pr-{pr_number}-{sha_short}-{timestamp}"
+
+ # Create worktree directory
+ self.worktree_base_dir.mkdir(parents=True, exist_ok=True)
+ worktree_path = self.worktree_base_dir / worktree_name
+
+ logger.debug(f"Creating worktree: {worktree_path}")
+
+ try:
+ # Fetch the commit if not available locally (handles fork PRs)
+ fetch_result = subprocess.run(
+ ["git", "fetch", "origin", head_sha],
+ cwd=self.project_dir,
+ capture_output=True,
+ text=True,
+ timeout=60,
+ )
+
+ if fetch_result.returncode != 0:
+ logger.warning(
+ f"Could not fetch {head_sha} from origin (fork PR?): {fetch_result.stderr}"
+ )
+ except subprocess.TimeoutExpired:
+ logger.warning(
+ f"Timeout fetching {head_sha} from origin, continuing anyway"
+ )
+
+ try:
+ # Create detached worktree at the PR commit
+ result = subprocess.run(
+ ["git", "worktree", "add", "--detach", str(worktree_path), head_sha],
+ cwd=self.project_dir,
+ capture_output=True,
+ text=True,
+ timeout=120,
+ )
+
+ if result.returncode != 0:
+ # Check for fatal errors in stderr (git outputs info to stderr too)
+ stderr = result.stderr.strip()
+ # Clean up partial worktree on failure
+ if worktree_path.exists():
+ shutil.rmtree(worktree_path, ignore_errors=True)
+ raise RuntimeError(f"Failed to create worktree: {stderr}")
+
+ # Verify the worktree was actually created
+ if not worktree_path.exists():
+ raise RuntimeError(
+ f"Worktree creation reported success but path does not exist: {worktree_path}"
+ )
+
+ except subprocess.TimeoutExpired:
+ # Clean up partial worktree on timeout
+ if worktree_path.exists():
+ shutil.rmtree(worktree_path, ignore_errors=True)
+ raise RuntimeError(f"Timeout creating worktree for {head_sha}")
+
+ logger.info(f"[WorktreeManager] Created worktree at {worktree_path}")
+ return worktree_path
+
+ def remove_worktree(self, worktree_path: Path) -> None:
+ """
+ Remove a PR worktree with fallback chain.
+
+ Args:
+ worktree_path: Path to the worktree to remove
+ """
+ if not worktree_path or not worktree_path.exists():
+ return
+
+ logger.debug(f"Removing worktree: {worktree_path}")
+
+ # Try 1: git worktree remove
+ try:
+ result = subprocess.run(
+ ["git", "worktree", "remove", "--force", str(worktree_path)],
+ cwd=self.project_dir,
+ capture_output=True,
+ text=True,
+ timeout=60,
+ )
+
+ if result.returncode == 0:
+ logger.info(f"[WorktreeManager] Removed worktree: {worktree_path.name}")
+ return
+ except subprocess.TimeoutExpired:
+ logger.warning(
+ f"Timeout removing worktree {worktree_path.name}, falling back to shutil"
+ )
+
+ # Try 2: shutil.rmtree fallback
+ try:
+ shutil.rmtree(worktree_path, ignore_errors=True)
+ subprocess.run(
+ ["git", "worktree", "prune"],
+ cwd=self.project_dir,
+ capture_output=True,
+ timeout=30,
+ )
+ logger.warning(
+ f"[WorktreeManager] Used shutil fallback for: {worktree_path.name}"
+ )
+ except Exception as e:
+ logger.error(
+ f"[WorktreeManager] Failed to remove worktree {worktree_path}: {e}"
+ )
+
+ def get_worktree_info(self) -> list[WorktreeInfo]:
+ """
+ Get information about all PR worktrees.
+
+ Returns:
+ List of WorktreeInfo objects sorted by age (oldest first)
+ """
+ if not self.worktree_base_dir.exists():
+ return []
+
+ worktrees = []
+ current_time = time.time()
+
+ for item in self.worktree_base_dir.iterdir():
+ if not item.is_dir():
+ continue
+
+ # Get modification time
+ mtime = item.stat().st_mtime
+ age_seconds = current_time - mtime
+ age_days = age_seconds / 86400 # Convert seconds to days
+
+ # Extract PR number from directory name (format: pr-XXX-sha)
+ pr_number = None
+ if item.name.startswith("pr-"):
+ parts = item.name.split("-")
+ if len(parts) >= 2:
+ try:
+ pr_number = int(parts[1])
+ except ValueError:
+ pass
+
+ worktrees.append(
+ WorktreeInfo(path=item, age_days=age_days, pr_number=pr_number)
+ )
+
+ # Sort by age (oldest first)
+ worktrees.sort(key=lambda x: x.age_days, reverse=True)
+
+ return worktrees
+
+ def get_registered_worktrees(self) -> set[Path]:
+ """
+ Get set of worktrees registered with git.
+
+ Returns:
+ Set of resolved Path objects for registered worktrees
+ """
+ try:
+ result = subprocess.run(
+ ["git", "worktree", "list", "--porcelain"],
+ cwd=self.project_dir,
+ capture_output=True,
+ text=True,
+ timeout=30,
+ )
+ except subprocess.TimeoutExpired:
+ logger.warning("Timeout listing worktrees, returning empty set")
+ return set()
+
+ registered = set()
+ for line in result.stdout.split("\n"):
+ if line.startswith("worktree "):
+ parts = line.split(" ", 1)
+ if len(parts) > 1 and parts[1]:
+ registered.add(Path(parts[1]))
+
+ return registered
+
+ def cleanup_worktrees(self, force: bool = False) -> dict[str, int]:
+ """
+ Clean up PR worktrees based on age and count policies.
+
+ Cleanup order:
+ 1. Remove orphaned worktrees (not registered with git)
+ 2. Remove worktrees older than PR_WORKTREE_MAX_AGE_DAYS
+ 3. If still over MAX_PR_WORKTREES, remove oldest worktrees
+
+ Args:
+ force: If True, skip age check and only enforce count limit
+
+ Returns:
+ Dict with cleanup statistics: {
+ 'orphaned': count,
+ 'expired': count,
+ 'excess': count,
+ 'total': count
+ }
+ """
+ stats = {"orphaned": 0, "expired": 0, "excess": 0, "total": 0}
+
+ if not self.worktree_base_dir.exists():
+ return stats
+
+ # Get registered worktrees (resolved paths for consistent comparison)
+ registered = self.get_registered_worktrees()
+ registered_resolved = {p.resolve() for p in registered}
+
+ # Get all PR worktree info
+ worktrees = self.get_worktree_info()
+
+ # Phase 1: Remove orphaned worktrees
+ for wt in worktrees:
+ if wt.path.resolve() not in registered_resolved:
+ logger.info(
+ f"[WorktreeManager] Removing orphaned worktree: {wt.path.name} (age: {wt.age_days:.1f} days)"
+ )
+ shutil.rmtree(wt.path, ignore_errors=True)
+ stats["orphaned"] += 1
+
+ # Refresh worktree list after orphan cleanup
+ try:
+ subprocess.run(
+ ["git", "worktree", "prune"],
+ cwd=self.project_dir,
+ capture_output=True,
+ timeout=30,
+ )
+ except subprocess.TimeoutExpired:
+ logger.warning("Timeout pruning worktrees, continuing anyway")
+
+ # Refresh registered worktrees after prune (git's internal registry may have changed)
+ registered_resolved = {p.resolve() for p in self.get_registered_worktrees()}
+
+ # Get fresh worktree info for remaining worktrees (use resolved paths)
+ worktrees = [
+ wt
+ for wt in self.get_worktree_info()
+ if wt.path.resolve() in registered_resolved
+ ]
+
+ # Phase 2: Remove expired worktrees (older than max age)
+ max_age_days = _get_max_age_days()
+ if not force:
+ for wt in worktrees:
+ if wt.age_days > max_age_days:
+ logger.info(
+ f"[WorktreeManager] Removing expired worktree: {wt.path.name} (age: {wt.age_days:.1f} days, max: {max_age_days} days)"
+ )
+ self.remove_worktree(wt.path)
+ stats["expired"] += 1
+
+ # Refresh worktree list after expiration cleanup (use resolved paths)
+ registered_resolved = {p.resolve() for p in self.get_registered_worktrees()}
+ worktrees = [
+ wt
+ for wt in self.get_worktree_info()
+ if wt.path.resolve() in registered_resolved
+ ]
+
+ # Phase 3: Remove excess worktrees (keep only max_pr_worktrees most recent)
+ max_pr_worktrees = _get_max_pr_worktrees()
+ if len(worktrees) > max_pr_worktrees:
+ # worktrees are already sorted by age (oldest first)
+ excess_count = len(worktrees) - max_pr_worktrees
+ for wt in worktrees[:excess_count]:
+ logger.info(
+ f"[WorktreeManager] Removing excess worktree: {wt.path.name} (count: {len(worktrees)}, max: {max_pr_worktrees})"
+ )
+ self.remove_worktree(wt.path)
+ stats["excess"] += 1
+
+ stats["total"] = stats["orphaned"] + stats["expired"] + stats["excess"]
+
+ if stats["total"] > 0:
+ logger.info(
+ f"[WorktreeManager] Cleanup complete: {stats['total']} worktrees removed "
+ f"(orphaned={stats['orphaned']}, expired={stats['expired']}, excess={stats['excess']})"
+ )
+ else:
+ logger.debug(
+ f"No cleanup needed (current: {len(worktrees)}, max: {max_pr_worktrees})"
+ )
+
+ return stats
+
+ def cleanup_all_worktrees(self) -> int:
+ """
+ Remove ALL PR worktrees (for testing or emergency cleanup).
+
+ Returns:
+ Number of worktrees removed
+ """
+ if not self.worktree_base_dir.exists():
+ return 0
+
+ worktrees = self.get_worktree_info()
+ count = 0
+
+ for wt in worktrees:
+ logger.info(f"[WorktreeManager] Removing worktree: {wt.path.name}")
+ self.remove_worktree(wt.path)
+ count += 1
+
+ if count > 0:
+ try:
+ subprocess.run(
+ ["git", "worktree", "prune"],
+ cwd=self.project_dir,
+ capture_output=True,
+ timeout=30,
+ )
+ except subprocess.TimeoutExpired:
+ logger.warning("Timeout pruning worktrees after cleanup")
+ logger.info(f"[WorktreeManager] Removed all {count} PR worktrees")
+
+ return count
diff --git a/apps/backend/runners/github/services/pydantic_models.py b/apps/backend/runners/github/services/pydantic_models.py
index 3c91a219eb..6777e97690 100644
--- a/apps/backend/runners/github/services/pydantic_models.py
+++ b/apps/backend/runners/github/services/pydantic_models.py
@@ -26,7 +26,7 @@
from typing import Literal
-from pydantic import BaseModel, Field, field_validator
+from pydantic import BaseModel, Field
# =============================================================================
# Common Finding Types
@@ -46,6 +46,10 @@ class BaseFinding(BaseModel):
line: int = Field(0, description="Line number of the issue")
suggested_fix: str | None = Field(None, description="How to fix this issue")
fixable: bool = Field(False, description="Whether this can be auto-fixed")
+ evidence: str | None = Field(
+ None,
+ description="Actual code snippet proving the issue exists. Required for validation.",
+ )
class SecurityFinding(BaseFinding):
@@ -78,9 +82,6 @@ class DeepAnalysisFinding(BaseFinding):
"performance",
"logic",
] = Field(description="Issue category")
- confidence: float = Field(
- 0.85, ge=0.0, le=1.0, description="AI's confidence in this finding (0.0-1.0)"
- )
verification_note: str | None = Field(
None, description="What evidence is missing or couldn't be verified"
)
@@ -315,21 +316,11 @@ class OrchestratorFinding(BaseModel):
description="Issue severity level"
)
suggestion: str | None = Field(None, description="How to fix this issue")
- confidence: float = Field(
- 0.85,
- ge=0.0,
- le=1.0,
- description="Confidence (0.0-1.0 or 0-100, normalized to 0.0-1.0)",
+ evidence: str | None = Field(
+ None,
+ description="Actual code snippet proving the issue exists. Required for validation.",
)
- @field_validator("confidence", mode="before")
- @classmethod
- def normalize_confidence(cls, v: int | float) -> float:
- """Normalize confidence to 0.0-1.0 range (accepts 0-100 or 0.0-1.0)."""
- if v > 1:
- return v / 100.0
- return float(v)
-
class OrchestratorReviewResponse(BaseModel):
"""Complete response schema for orchestrator PR review."""
@@ -355,9 +346,6 @@ class LogicFinding(BaseFinding):
category: Literal["logic"] = Field(
default="logic", description="Always 'logic' for logic findings"
)
- confidence: float = Field(
- 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)"
- )
example_input: str | None = Field(
None, description="Concrete input that triggers the bug"
)
@@ -366,14 +354,6 @@ class LogicFinding(BaseFinding):
None, description="What the code should produce"
)
- @field_validator("confidence", mode="before")
- @classmethod
- def normalize_confidence(cls, v: int | float) -> float:
- """Normalize confidence to 0.0-1.0 range."""
- if v > 1:
- return v / 100.0
- return float(v)
-
class CodebaseFitFinding(BaseFinding):
"""A codebase fit finding from the codebase fit review agent."""
@@ -381,9 +361,6 @@ class CodebaseFitFinding(BaseFinding):
category: Literal["codebase_fit"] = Field(
default="codebase_fit", description="Always 'codebase_fit' for fit findings"
)
- confidence: float = Field(
- 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)"
- )
existing_code: str | None = Field(
None, description="Reference to existing code that should be used instead"
)
@@ -391,14 +368,6 @@ class CodebaseFitFinding(BaseFinding):
None, description="Description of the established pattern being violated"
)
- @field_validator("confidence", mode="before")
- @classmethod
- def normalize_confidence(cls, v: int | float) -> float:
- """Normalize confidence to 0.0-1.0 range."""
- if v > 1:
- return v / 100.0
- return float(v)
-
class ParallelOrchestratorFinding(BaseModel):
"""A finding from the parallel orchestrator with source agent tracking."""
@@ -423,8 +392,9 @@ class ParallelOrchestratorFinding(BaseModel):
severity: Literal["critical", "high", "medium", "low"] = Field(
description="Issue severity level"
)
- confidence: float = Field(
- 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)"
+ evidence: str | None = Field(
+ None,
+ description="Actual code snippet proving the issue exists. Required for validation.",
)
suggested_fix: str | None = Field(None, description="How to fix this issue")
fixable: bool = Field(False, description="Whether this can be auto-fixed")
@@ -436,14 +406,6 @@ class ParallelOrchestratorFinding(BaseModel):
False, description="Whether multiple agents agreed on this finding"
)
- @field_validator("confidence", mode="before")
- @classmethod
- def normalize_confidence(cls, v: int | float) -> float:
- """Normalize confidence to 0.0-1.0 range."""
- if v > 1:
- return v / 100.0
- return float(v)
-
class AgentAgreement(BaseModel):
"""Tracks agreement between agents on findings."""
@@ -496,22 +458,14 @@ class ResolutionVerification(BaseModel):
status: Literal["resolved", "partially_resolved", "unresolved", "cant_verify"] = (
Field(description="Resolution status after AI verification")
)
- confidence: float = Field(
- 0.85, ge=0.0, le=1.0, description="Confidence in the resolution status"
+ evidence: str = Field(
+ min_length=1,
+ description="Actual code snippet showing the resolution status. Required.",
)
- evidence: str = Field(description="What evidence supports this resolution status")
resolution_notes: str | None = Field(
None, description="Detailed notes on how the issue was addressed"
)
- @field_validator("confidence", mode="before")
- @classmethod
- def normalize_confidence(cls, v: int | float) -> float:
- """Normalize confidence to 0.0-1.0 range."""
- if v > 1:
- return v / 100.0
- return float(v)
-
class ParallelFollowupFinding(BaseModel):
"""A finding from parallel follow-up review with source agent tracking."""
@@ -534,8 +488,9 @@ class ParallelFollowupFinding(BaseModel):
severity: Literal["critical", "high", "medium", "low"] = Field(
description="Issue severity level"
)
- confidence: float = Field(
- 0.85, ge=0.0, le=1.0, description="Confidence in this finding (0.0-1.0)"
+ evidence: str | None = Field(
+ None,
+ description="Actual code snippet proving the issue exists. Required for validation.",
)
suggested_fix: str | None = Field(None, description="How to fix this issue")
fixable: bool = Field(False, description="Whether this can be auto-fixed")
@@ -546,14 +501,6 @@ class ParallelFollowupFinding(BaseModel):
None, description="ID of related previous finding if this is a regression"
)
- @field_validator("confidence", mode="before")
- @classmethod
- def normalize_confidence(cls, v: int | float) -> float:
- """Normalize confidence to 0.0-1.0 range."""
- if v > 1:
- return v / 100.0
- return float(v)
-
class CommentAnalysis(BaseModel):
"""Analysis of a contributor or AI comment."""
@@ -640,6 +587,9 @@ class FindingValidationResult(BaseModel):
The finding-validator agent uses this to report whether a previous finding
is a genuine issue or a false positive that should be dismissed.
+
+ EVIDENCE-BASED VALIDATION: No confidence scores - validation is binary.
+ Either the evidence shows the issue exists, or it doesn't.
"""
finding_id: str = Field(description="ID of the finding being validated")
@@ -648,16 +598,17 @@ class FindingValidationResult(BaseModel):
] = Field(
description=(
"Validation result: "
- "confirmed_valid = issue IS real, keep as unresolved; "
- "dismissed_false_positive = original finding was incorrect, remove; "
- "needs_human_review = cannot determine with confidence"
+ "confirmed_valid = code evidence proves issue IS real; "
+ "dismissed_false_positive = code evidence proves issue does NOT exist; "
+ "needs_human_review = cannot find definitive evidence either way"
)
)
code_evidence: str = Field(
min_length=1,
description=(
"REQUIRED: Exact code snippet examined from the file. "
- "Must be actual code, not a description."
+ "Must be actual code copy-pasted from the file, not a description. "
+ "This is the proof that determines the validation status."
),
)
line_range: tuple[int, int] = Field(
@@ -666,27 +617,18 @@ class FindingValidationResult(BaseModel):
explanation: str = Field(
min_length=20,
description=(
- "Detailed explanation of why the finding is valid/invalid. "
- "Must reference specific code and explain the reasoning."
+ "Detailed explanation connecting the code_evidence to the validation_status. "
+ "Must explain: (1) what the original finding claimed, (2) what the actual code shows, "
+ "(3) why this proves/disproves the issue."
),
)
- confidence: float = Field(
- ge=0.0,
- le=1.0,
+ evidence_verified_in_file: bool = Field(
description=(
- "Confidence in the validation result (0.0-1.0). "
- "Must be >= 0.80 to dismiss as false positive, >= 0.70 to confirm valid."
- ),
+ "True if the code_evidence was verified to exist at the specified line_range. "
+ "False if the code couldn't be found (indicates hallucination in original finding)."
+ )
)
- @field_validator("confidence", mode="before")
- @classmethod
- def normalize_confidence(cls, v: int | float) -> float:
- """Normalize confidence to 0.0-1.0 range (accepts 0-100 or 0.0-1.0)."""
- if v > 1:
- return v / 100.0
- return float(v)
-
class FindingValidationResponse(BaseModel):
"""Complete response from the finding-validator agent."""
diff --git a/apps/backend/runners/github/services/response_parsers.py b/apps/backend/runners/github/services/response_parsers.py
index db318463d2..2df83ea06b 100644
--- a/apps/backend/runners/github/services/response_parsers.py
+++ b/apps/backend/runners/github/services/response_parsers.py
@@ -33,8 +33,9 @@
TriageResult,
)
-# Confidence threshold for filtering findings (GitHub Copilot standard)
-CONFIDENCE_THRESHOLD = 0.80
+# Evidence-based validation replaces confidence scoring
+# Findings without evidence are filtered out instead of using confidence thresholds
+MIN_EVIDENCE_LENGTH = 20 # Minimum chars for evidence to be considered valid
class ResponseParser:
@@ -65,9 +66,13 @@ def parse_scan_result(response_text: str) -> dict:
@staticmethod
def parse_review_findings(
- response_text: str, apply_confidence_filter: bool = True
+ response_text: str, require_evidence: bool = True
) -> list[PRReviewFinding]:
- """Parse findings from AI response with optional confidence filtering."""
+ """Parse findings from AI response with optional evidence validation.
+
+ Evidence-based validation: Instead of confidence scores, findings
+ require actual code evidence proving the issue exists.
+ """
findings = []
try:
@@ -77,14 +82,14 @@ def parse_review_findings(
if json_match:
findings_data = json.loads(json_match.group(1))
for i, f in enumerate(findings_data):
- # Get confidence (default to 0.85 if not provided for backward compat)
- confidence = float(f.get("confidence", 0.85))
+ # Get evidence (code snippet proving the issue)
+ evidence = f.get("evidence") or f.get("code_snippet") or ""
- # Apply confidence threshold filter
- if apply_confidence_filter and confidence < CONFIDENCE_THRESHOLD:
+ # Apply evidence-based validation
+ if require_evidence and len(evidence.strip()) < MIN_EVIDENCE_LENGTH:
print(
f"[AI] Dropped finding '{f.get('title', 'unknown')}': "
- f"confidence {confidence:.2f} < {CONFIDENCE_THRESHOLD}",
+ f"insufficient evidence ({len(evidence.strip())} chars < {MIN_EVIDENCE_LENGTH})",
flush=True,
)
continue
@@ -105,8 +110,8 @@ def parse_review_findings(
end_line=f.get("end_line"),
suggested_fix=f.get("suggested_fix"),
fixable=f.get("fixable", False),
- # NEW: Support verification and redundancy fields
- confidence=confidence,
+ # Evidence-based validation fields
+ evidence=evidence if evidence.strip() else None,
verification_note=f.get("verification_note"),
redundant_with=f.get("redundant_with"),
)
diff --git a/apps/backend/runners/github/services/review_tools.py b/apps/backend/runners/github/services/review_tools.py
index 881d8353cf..1a53a6b126 100644
--- a/apps/backend/runners/github/services/review_tools.py
+++ b/apps/backend/runners/github/services/review_tools.py
@@ -140,7 +140,9 @@ async def spawn_security_review(
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
result_text += block.text
# Parse findings
@@ -223,7 +225,9 @@ async def spawn_quality_review(
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
result_text += block.text
findings = _parse_findings_from_response(result_text, source="quality_agent")
@@ -316,7 +320,9 @@ async def spawn_deep_analysis(
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
result_text += block.text
findings = _parse_findings_from_response(result_text, source="deep_analysis")
diff --git a/apps/backend/runners/github/services/sdk_utils.py b/apps/backend/runners/github/services/sdk_utils.py
index 0e6da74f30..7471f16360 100644
--- a/apps/backend/runners/github/services/sdk_utils.py
+++ b/apps/backend/runners/github/services/sdk_utils.py
@@ -235,8 +235,9 @@ async def process_sdk_stream(
if on_tool_use:
on_tool_use(tool_name, tool_id, tool_input)
- # Collect text
- if hasattr(block, "text"):
+ # Collect text - must check block type since only TextBlock has .text
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
result_text += block.text
# Always print text content preview (not just in DEBUG_MODE)
text_preview = block.text[:500].replace("\n", " ").strip()
diff --git a/apps/backend/runners/github/services/triage_engine.py b/apps/backend/runners/github/services/triage_engine.py
index 2508207012..57a6b04310 100644
--- a/apps/backend/runners/github/services/triage_engine.py
+++ b/apps/backend/runners/github/services/triage_engine.py
@@ -87,7 +87,9 @@ async def triage_single_issue(
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
return self.parser.parse_triage_result(
diff --git a/apps/backend/runners/gitlab/runner.py b/apps/backend/runners/gitlab/runner.py
index c2a0be32a5..d4f61827bb 100644
--- a/apps/backend/runners/gitlab/runner.py
+++ b/apps/backend/runners/gitlab/runner.py
@@ -26,8 +26,10 @@
# Add backend to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
-# Load .env file
-from dotenv import load_dotenv
+# Load .env file with centralized error handling
+from cli.utils import import_dotenv
+
+load_dotenv = import_dotenv()
env_file = Path(__file__).parent.parent.parent / ".env"
if env_file.exists():
diff --git a/apps/backend/runners/gitlab/services/mr_review_engine.py b/apps/backend/runners/gitlab/services/mr_review_engine.py
index d1679a4b62..ef8ef9aaf0 100644
--- a/apps/backend/runners/gitlab/services/mr_review_engine.py
+++ b/apps/backend/runners/gitlab/services/mr_review_engine.py
@@ -234,7 +234,9 @@ async def run_review(
msg_type = type(msg).__name__
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
result_text += block.text
self._report_progress(
diff --git a/apps/backend/runners/ideation_runner.py b/apps/backend/runners/ideation_runner.py
index 63714a372f..9b91445601 100644
--- a/apps/backend/runners/ideation_runner.py
+++ b/apps/backend/runners/ideation_runner.py
@@ -26,8 +26,10 @@
# Add auto-claude to path
sys.path.insert(0, str(Path(__file__).parent.parent))
-# Load .env file from auto-claude/ directory
-from dotenv import load_dotenv
+# Load .env file with centralized error handling
+from cli.utils import import_dotenv
+
+load_dotenv = import_dotenv()
env_file = Path(__file__).parent.parent / ".env"
if env_file.exists():
@@ -94,8 +96,8 @@ def main():
parser.add_argument(
"--model",
type=str,
- default="claude-opus-4-5-20251101",
- help="Model to use (default: claude-opus-4-5-20251101)",
+ default="sonnet", # Changed from "opus" (fix #433)
+ help="Model to use (haiku, sonnet, opus, or full model ID)",
)
parser.add_argument(
"--thinking-level",
diff --git a/apps/backend/runners/insights_runner.py b/apps/backend/runners/insights_runner.py
index a2de9f9408..bd4bf362c4 100644
--- a/apps/backend/runners/insights_runner.py
+++ b/apps/backend/runners/insights_runner.py
@@ -15,8 +15,10 @@
# Add auto-claude to path
sys.path.insert(0, str(Path(__file__).parent.parent))
-# Load .env file from auto-claude/ directory
-from dotenv import load_dotenv
+# Load .env file with centralized error handling
+from cli.utils import import_dotenv
+
+load_dotenv = import_dotenv()
env_file = Path(__file__).parent.parent / ".env"
if env_file.exists():
@@ -39,6 +41,7 @@
debug_section,
debug_success,
)
+from phase_config import resolve_model_id
def load_project_context(project_dir: str) -> str:
@@ -132,7 +135,7 @@ async def run_with_sdk(
project_dir: str,
message: str,
history: list,
- model: str = "claude-sonnet-4-5-20250929",
+ model: str = "sonnet", # Shorthand - resolved via API Profile if configured
thinking_level: str = "medium",
) -> None:
"""Run the chat using Claude SDK with streaming."""
@@ -180,7 +183,7 @@ async def run_with_sdk(
# Create Claude SDK client with appropriate settings for insights
client = ClaudeSDKClient(
options=ClaudeAgentOptions(
- model=model, # Use configured model
+ model=resolve_model_id(model), # Resolve via API Profile if configured
system_prompt=system_prompt,
allowed_tools=[
"Read",
@@ -336,8 +339,8 @@ def main():
)
parser.add_argument(
"--model",
- default="claude-sonnet-4-5-20250929",
- help="Claude model ID (default: claude-sonnet-4-5-20250929)",
+ default="sonnet",
+ help="Model to use (haiku, sonnet, opus, or full model ID)",
)
parser.add_argument(
"--thinking-level",
diff --git a/apps/backend/runners/roadmap/models.py b/apps/backend/runners/roadmap/models.py
index cc7a1f5f8b..377f5cfacc 100644
--- a/apps/backend/runners/roadmap/models.py
+++ b/apps/backend/runners/roadmap/models.py
@@ -23,6 +23,6 @@ class RoadmapConfig:
project_dir: Path
output_dir: Path
- model: str = "claude-opus-4-5-20251101"
+ model: str = "sonnet" # Changed from "opus" (fix #433)
refresh: bool = False # Force regeneration even if roadmap exists
enable_competitor_analysis: bool = False # Enable competitor analysis phase
diff --git a/apps/backend/runners/roadmap/orchestrator.py b/apps/backend/runners/roadmap/orchestrator.py
index b7a9803af1..b49ca2c1cb 100644
--- a/apps/backend/runners/roadmap/orchestrator.py
+++ b/apps/backend/runners/roadmap/orchestrator.py
@@ -27,7 +27,7 @@ def __init__(
self,
project_dir: Path,
output_dir: Path | None = None,
- model: str = "claude-opus-4-5-20251101",
+ model: str = "sonnet", # Changed from "opus" (fix #433)
thinking_level: str = "medium",
refresh: bool = False,
enable_competitor_analysis: bool = False,
diff --git a/apps/backend/runners/roadmap_runner.py b/apps/backend/runners/roadmap_runner.py
index 88f157b12c..06625add7e 100644
--- a/apps/backend/runners/roadmap_runner.py
+++ b/apps/backend/runners/roadmap_runner.py
@@ -20,8 +20,10 @@
# Add auto-claude to path
sys.path.insert(0, str(Path(__file__).parent.parent))
-# Load .env file from auto-claude/ directory
-from dotenv import load_dotenv
+# Load .env file with centralized error handling
+from cli.utils import import_dotenv
+
+load_dotenv = import_dotenv()
env_file = Path(__file__).parent.parent / ".env"
if env_file.exists():
@@ -55,8 +57,8 @@ def main():
parser.add_argument(
"--model",
type=str,
- default="claude-opus-4-5-20251101",
- help="Model to use (default: claude-opus-4-5-20251101)",
+ default="sonnet", # Changed from "opus" (fix #433)
+ help="Model to use (haiku, sonnet, opus, or full model ID)",
)
parser.add_argument(
"--thinking-level",
diff --git a/apps/backend/runners/spec_runner.py b/apps/backend/runners/spec_runner.py
index 0bda6db115..30adbf3fa6 100644
--- a/apps/backend/runners/spec_runner.py
+++ b/apps/backend/runners/spec_runner.py
@@ -26,11 +26,11 @@
- Risk factors and edge cases
Usage:
- python auto-claude/spec_runner.py --task "Add user authentication"
- python auto-claude/spec_runner.py --interactive
- python auto-claude/spec_runner.py --continue 001-feature
- python auto-claude/spec_runner.py --task "Fix button color" --complexity simple
- python auto-claude/spec_runner.py --task "Simple fix" --no-ai-assessment
+ python runners/spec_runner.py --task "Add user authentication"
+ python runners/spec_runner.py --interactive
+ python runners/spec_runner.py --continue 001-feature
+ python runners/spec_runner.py --task "Fix button color" --complexity simple
+ python runners/spec_runner.py --task "Simple fix" --no-ai-assessment
"""
import sys
@@ -81,8 +81,10 @@
# Add auto-claude to path (parent of runners/)
sys.path.insert(0, str(Path(__file__).parent.parent))
-# Load .env file
-from dotenv import load_dotenv
+# Load .env file with centralized error handling
+from cli.utils import import_dotenv
+
+load_dotenv = import_dotenv()
env_file = Path(__file__).parent.parent / ".env"
dev_env_file = Path(__file__).parent.parent.parent / "dev" / "auto-claude" / ".env"
@@ -198,9 +200,21 @@ def main():
default=None,
help="Base branch for creating worktrees (default: auto-detect or current branch)",
)
+ parser.add_argument(
+ "--direct",
+ action="store_true",
+ help="Build directly in project without worktree isolation (default: use isolated worktree)",
+ )
args = parser.parse_args()
+ # Warn user about direct mode risks
+ if args.direct:
+ print_status(
+ "Direct mode: Building in project directory without worktree isolation",
+ "warning",
+ )
+
# Handle task from file if provided
task_description = args.task
if args.task_file:
@@ -328,6 +342,10 @@ def main():
if args.base_branch:
run_cmd.extend(["--base-branch", args.base_branch])
+ # Pass --direct flag if specified (skip worktree isolation)
+ if args.direct:
+ run_cmd.append("--direct")
+
# Note: Model configuration for subsequent phases (planning, coding, qa)
# is read from task_metadata.json by run.py, so we don't pass it here.
# This allows per-phase configuration when using Auto profile.
diff --git a/apps/backend/security/__init__.py b/apps/backend/security/__init__.py
index 9b389373b6..b26311d292 100644
--- a/apps/backend/security/__init__.py
+++ b/apps/backend/security/__init__.py
@@ -62,7 +62,9 @@
validate_chmod_command,
validate_dropdb_command,
validate_dropuser_command,
+ validate_git_command,
validate_git_commit,
+ validate_git_config,
validate_init_script,
validate_kill_command,
validate_killall_command,
@@ -93,7 +95,9 @@
"validate_chmod_command",
"validate_rm_command",
"validate_init_script",
+ "validate_git_command",
"validate_git_commit",
+ "validate_git_config",
"validate_dropdb_command",
"validate_dropuser_command",
"validate_psql_command",
diff --git a/apps/backend/security/constants.py b/apps/backend/security/constants.py
new file mode 100644
index 0000000000..3ddbca3002
--- /dev/null
+++ b/apps/backend/security/constants.py
@@ -0,0 +1,16 @@
+"""
+Security Constants
+==================
+
+Shared constants for the security module.
+"""
+
+# Environment variable name for the project directory
+# Set by agents (coder.py, loop.py) at startup to ensure security hooks
+# can find the correct project directory even in worktree mode.
+PROJECT_DIR_ENV_VAR = "AUTO_CLAUDE_PROJECT_DIR"
+
+# Security configuration filenames
+# These are the files that control which commands are allowed to run.
+ALLOWLIST_FILENAME = ".auto-claude-allowlist"
+PROFILE_FILENAME = ".auto-claude-security.json"
diff --git a/apps/backend/security/git_validators.py b/apps/backend/security/git_validators.py
index 5a75ad39f1..5c21d32909 100644
--- a/apps/backend/security/git_validators.py
+++ b/apps/backend/security/git_validators.py
@@ -2,7 +2,9 @@
Git Validators
==============
-Validators for git operations (commit with secret scanning).
+Validators for git operations:
+- Commit with secret scanning
+- Config protection (prevent setting test users)
"""
import shlex
@@ -10,8 +12,203 @@
from .validation_models import ValidationResult
+# =============================================================================
+# BLOCKED GIT CONFIG PATTERNS
+# =============================================================================
-def validate_git_commit(command_string: str) -> ValidationResult:
+# Git config keys that agents must NOT modify
+# These are identity settings that should inherit from the user's global config
+#
+# NOTE: This validation covers command-line arguments (git config, git -c).
+# Environment variables (GIT_AUTHOR_NAME, GIT_AUTHOR_EMAIL, GIT_COMMITTER_NAME,
+# GIT_COMMITTER_EMAIL) are NOT validated here as they require pre-execution
+# environment filtering, which is handled at the sandbox/hook level.
+BLOCKED_GIT_CONFIG_KEYS = {
+ "user.name",
+ "user.email",
+ "author.name",
+ "author.email",
+ "committer.name",
+ "committer.email",
+}
+
+
+def validate_git_config(command_string: str) -> ValidationResult:
+ """
+ Validate git config commands - block identity changes.
+
+ Agents should not set user.name, user.email, etc. as this:
+ 1. Breaks commit attribution
+ 2. Can create fake "Test User" identities
+ 3. Overrides the user's legitimate git identity
+
+ Args:
+ command_string: The full git command string
+
+ Returns:
+ Tuple of (is_valid, error_message)
+ """
+ try:
+ tokens = shlex.split(command_string)
+ except ValueError:
+ return False, "Could not parse git command" # Fail closed on parse errors
+
+ if len(tokens) < 2 or tokens[0] != "git" or tokens[1] != "config":
+ return True, "" # Not a git config command
+
+ # Check for read-only operations first - these are always allowed
+ # --get, --get-all, --get-regexp, --list are all read operations
+ read_only_flags = {"--get", "--get-all", "--get-regexp", "--list", "-l"}
+ for token in tokens[2:]:
+ if token in read_only_flags:
+ return True, "" # Read operation, allow it
+
+ # Extract the config key from the command
+ # git config [options] [value] - key is typically after config and any options
+ config_key = None
+ for token in tokens[2:]:
+ # Skip options (start with -)
+ if token.startswith("-"):
+ continue
+ # First non-option token is the config key
+ config_key = token.lower()
+ break
+
+ if not config_key:
+ return True, "" # No config key specified (e.g., git config --list)
+
+ # Check if the exact config key is blocked
+ for blocked_key in BLOCKED_GIT_CONFIG_KEYS:
+ if config_key == blocked_key:
+ return False, (
+ f"BLOCKED: Cannot modify git identity configuration\n\n"
+ f"You attempted to set '{blocked_key}' which is not allowed.\n\n"
+ f"WHY: Git identity (user.name, user.email) must inherit from the user's "
+ f"global git configuration. Setting fake identities like 'Test User' breaks "
+ f"commit attribution and causes serious issues.\n\n"
+ f"WHAT TO DO: Simply commit without setting any user configuration. "
+ f"The repository will use the correct identity automatically."
+ )
+
+ return True, ""
+
+
+def validate_git_inline_config(tokens: list[str]) -> ValidationResult:
+ """
+ Check for blocked config keys passed via git -c flag.
+
+ Git allows inline config with: git -c key=value
+ This bypasses 'git config' validation, so we must check all git commands
+ for -c flags containing blocked identity keys.
+
+ Args:
+ tokens: Parsed command tokens
+
+ Returns:
+ Tuple of (is_valid, error_message)
+ """
+ i = 1 # Start after 'git'
+ while i < len(tokens):
+ token = tokens[i]
+
+ # Check for -c flag (can be "-c key=value" or "-c" "key=value")
+ if token == "-c":
+ # Next token should be the key=value
+ if i + 1 < len(tokens):
+ config_pair = tokens[i + 1]
+ # Extract the key from key=value
+ if "=" in config_pair:
+ config_key = config_pair.split("=", 1)[0].lower()
+ if config_key in BLOCKED_GIT_CONFIG_KEYS:
+ return False, (
+ f"BLOCKED: Cannot set git identity via -c flag\n\n"
+ f"You attempted to use '-c {config_pair}' which sets a blocked "
+ f"identity configuration.\n\n"
+ f"WHY: Git identity (user.name, user.email) must inherit from the "
+ f"user's global git configuration. Setting fake identities breaks "
+ f"commit attribution and causes serious issues.\n\n"
+ f"WHAT TO DO: Remove the -c flag and commit normally. "
+ f"The repository will use the correct identity automatically."
+ )
+ i += 2 # Skip -c and its value
+ continue
+ elif token.startswith("-c"):
+ # Handle -ckey=value format (no space)
+ config_pair = token[2:] # Remove "-c" prefix
+ if "=" in config_pair:
+ config_key = config_pair.split("=", 1)[0].lower()
+ if config_key in BLOCKED_GIT_CONFIG_KEYS:
+ return False, (
+ f"BLOCKED: Cannot set git identity via -c flag\n\n"
+ f"You attempted to use '{token}' which sets a blocked "
+ f"identity configuration.\n\n"
+ f"WHY: Git identity (user.name, user.email) must inherit from the "
+ f"user's global git configuration. Setting fake identities breaks "
+ f"commit attribution and causes serious issues.\n\n"
+ f"WHAT TO DO: Remove the -c flag and commit normally. "
+ f"The repository will use the correct identity automatically."
+ )
+
+ i += 1
+
+ return True, ""
+
+
+def validate_git_command(command_string: str) -> ValidationResult:
+ """
+ Main git validator that checks all git security rules.
+
+ Currently validates:
+ - git -c: Block identity changes via inline config on ANY git command
+ - git config: Block identity changes
+ - git commit: Run secret scanning
+
+ Args:
+ command_string: The full git command string
+
+ Returns:
+ Tuple of (is_valid, error_message)
+ """
+ try:
+ tokens = shlex.split(command_string)
+ except ValueError:
+ return False, "Could not parse git command"
+
+ if not tokens or tokens[0] != "git":
+ return True, ""
+
+ if len(tokens) < 2:
+ return True, "" # Just "git" with no subcommand
+
+ # Check for blocked -c flags on ANY git command (security bypass prevention)
+ is_valid, error_msg = validate_git_inline_config(tokens)
+ if not is_valid:
+ return is_valid, error_msg
+
+ # Find the actual subcommand (skip global options like -c, -C, --git-dir, etc.)
+ subcommand = None
+ for token in tokens[1:]:
+ # Skip options and their values
+ if token.startswith("-"):
+ continue
+ subcommand = token
+ break
+
+ if not subcommand:
+ return True, "" # No subcommand found
+
+ # Check git config commands
+ if subcommand == "config":
+ return validate_git_config(command_string)
+
+ # Check git commit commands (secret scanning)
+ if subcommand == "commit":
+ return validate_git_commit_secrets(command_string)
+
+ return True, ""
+
+
+def validate_git_commit_secrets(command_string: str) -> ValidationResult:
"""
Validate git commit commands - run secret scan before allowing commit.
@@ -99,3 +296,8 @@ def validate_git_commit(command_string: str) -> ValidationResult:
)
return False, "\n".join(error_lines)
+
+
+# Backwards compatibility alias - the registry uses this name
+# Now delegates to the comprehensive validator
+validate_git_commit = validate_git_command
diff --git a/apps/backend/security/hooks.py b/apps/backend/security/hooks.py
index 35152d4433..4bc7328d3a 100644
--- a/apps/backend/security/hooks.py
+++ b/apps/backend/security/hooks.py
@@ -66,10 +66,20 @@ async def bash_security_hook(
return {}
# Get the working directory from context or use current directory
- # In the actual client, this would be set by the ClaudeSDKClient
- cwd = os.getcwd()
- if context and hasattr(context, "cwd"):
+ # Priority:
+ # 1. Environment variable PROJECT_DIR_ENV_VAR (set by agent on startup)
+ # 2. input_data cwd (passed by SDK in the tool call)
+ # 3. Context cwd (should be set by ClaudeSDKClient but sometimes isn't)
+ # 4. Current working directory (fallback, may be incorrect in worktree mode)
+ from .constants import PROJECT_DIR_ENV_VAR
+
+ cwd = os.environ.get(PROJECT_DIR_ENV_VAR)
+ if not cwd:
+ cwd = input_data.get("cwd")
+ if not cwd and context and hasattr(context, "cwd"):
cwd = context.cwd
+ if not cwd:
+ cwd = os.getcwd()
# Get or create security profile
# Note: In actual use, spec_dir would be passed through context
diff --git a/apps/backend/security/parser.py b/apps/backend/security/parser.py
index 1b8ead069a..1c51999866 100644
--- a/apps/backend/security/parser.py
+++ b/apps/backend/security/parser.py
@@ -4,11 +4,137 @@
Functions for parsing and extracting commands from shell command strings.
Handles compound commands, pipes, subshells, and various shell constructs.
+
+Windows Compatibility Note:
+--------------------------
+On Windows, commands containing paths with backslashes can cause shlex.split()
+to fail (e.g., incomplete commands with unclosed quotes). This module includes
+a fallback parser that extracts command names even from malformed commands,
+ensuring security validation can still proceed.
"""
-import os
import re
import shlex
+from pathlib import PurePosixPath, PureWindowsPath
+
+
+def _cross_platform_basename(path: str) -> str:
+ """
+ Extract the basename from a path in a cross-platform way.
+
+ Handles both Windows paths (C:\\dir\\cmd.exe) and POSIX paths (/dir/cmd)
+ regardless of the current platform. This is critical for running tests
+ on Linux CI while handling Windows-style paths.
+
+ Args:
+ path: A file path string (Windows or POSIX format)
+
+ Returns:
+ The basename of the path (e.g., "python.exe" from "C:\\Python312\\python.exe")
+ """
+ # Strip surrounding quotes if present
+ path = path.strip("'\"")
+
+ # Check if this looks like a Windows path (contains backslash or drive letter)
+ if "\\" in path or (len(path) >= 2 and path[1] == ":"):
+ # Use PureWindowsPath to handle Windows paths on any platform
+ return PureWindowsPath(path).name
+
+ # For POSIX paths or simple command names, use PurePosixPath
+ # (os.path.basename works but PurePosixPath is more explicit)
+ return PurePosixPath(path).name
+
+
+def _fallback_extract_commands(command_string: str) -> list[str]:
+ """
+ Fallback command extraction when shlex.split() fails.
+
+ Uses regex to extract command names from potentially malformed commands.
+ This is more permissive than shlex but ensures we can at least identify
+ the commands being executed for security validation.
+
+ Args:
+ command_string: The command string to parse
+
+ Returns:
+ List of command names extracted from the string
+ """
+ commands = []
+
+ # Shell keywords to skip
+ shell_keywords = {
+ "if",
+ "then",
+ "else",
+ "elif",
+ "fi",
+ "for",
+ "while",
+ "until",
+ "do",
+ "done",
+ "case",
+ "esac",
+ "in",
+ "function",
+ }
+
+ # First, split by common shell operators
+ # This regex splits on &&, ||, |, ; while being careful about quotes
+ # We're being permissive here since shlex already failed
+ parts = re.split(r"\s*(?:&&|\|\||\|)\s*|;\s*", command_string)
+
+ for part in parts:
+ part = part.strip()
+ if not part:
+ continue
+
+ # Skip variable assignments at the start (VAR=value cmd)
+ while re.match(r"^[A-Za-z_][A-Za-z0-9_]*=\S*\s+", part):
+ part = re.sub(r"^[A-Za-z_][A-Za-z0-9_]*=\S*\s+", "", part)
+
+ if not part:
+ continue
+
+ # Strategy: Extract command from the BEGINNING of the part
+ # Handle various formats:
+ # - Simple: python3, npm, git
+ # - Unix path: /usr/bin/python
+ # - Windows path: C:\Python312\python.exe
+ # - Quoted with spaces: "C:\Program Files\python.exe"
+
+ # Extract first token, handling quoted strings with spaces
+ first_token_match = re.match(r'^(?:"([^"]+)"|\'([^\']+)\'|([^\s]+))', part)
+ if not first_token_match:
+ continue
+
+ # Pick whichever capture group matched (double-quoted, single-quoted, or unquoted)
+ first_token = (
+ first_token_match.group(1)
+ or first_token_match.group(2)
+ or first_token_match.group(3)
+ )
+
+ # Now extract just the command name from this token
+ # Handle Windows paths (C:\dir\cmd.exe) and Unix paths (/dir/cmd)
+ # Use cross-platform basename for reliable path handling on any OS
+ cmd = _cross_platform_basename(first_token)
+
+ # Remove Windows extensions
+ cmd = re.sub(r"\.(exe|cmd|bat|ps1|sh)$", "", cmd, flags=re.IGNORECASE)
+
+ # Clean up any remaining quotes or special chars at the start
+ cmd = re.sub(r'^["\'\\/]+', "", cmd)
+
+ # Skip tokens that look like function calls or code fragments (not shell commands)
+ # These appear when splitting on semicolons inside malformed quoted strings
+ if "(" in cmd or ")" in cmd or "." in cmd:
+ continue
+
+ if cmd and cmd.lower() not in shell_keywords:
+ commands.append(cmd)
+
+ return commands
def split_command_segments(command_string: str) -> list[str]:
@@ -32,13 +158,46 @@ def split_command_segments(command_string: str) -> list[str]:
return result
+def _contains_windows_path(command_string: str) -> bool:
+ """
+ Check if a command string contains Windows-style paths.
+
+ Windows paths with backslashes cause issues with shlex.split() because
+ backslashes are interpreted as escape characters in POSIX mode.
+
+ Args:
+ command_string: The command string to check
+
+ Returns:
+ True if Windows paths are detected
+ """
+ # Pattern matches:
+ # - Drive letter paths: C:\, D:\, etc.
+ # - Backslash followed by a path component (2+ chars to avoid escape sequences like \n, \t)
+ # The second char must be alphanumeric, underscore, or another path separator
+ # This avoids false positives on escape sequences which are single-char after backslash
+ return bool(re.search(r"[A-Za-z]:\\|\\[A-Za-z][A-Za-z0-9_\\/]", command_string))
+
+
def extract_commands(command_string: str) -> list[str]:
"""
Extract command names from a shell command string.
Handles pipes, command chaining (&&, ||, ;), and subshells.
Returns the base command names (without paths).
+
+ On Windows or when commands contain malformed quoting (common with
+ Windows paths in bash-style commands), falls back to regex-based
+ extraction to ensure security validation can proceed.
"""
+ # If command contains Windows paths, use fallback parser directly
+ # because shlex.split() interprets backslashes as escape characters
+ if _contains_windows_path(command_string):
+ fallback_commands = _fallback_extract_commands(command_string)
+ if fallback_commands:
+ return fallback_commands
+ # Continue with shlex if fallback found nothing
+
commands = []
# Split on semicolons that aren't inside quotes
@@ -53,7 +212,12 @@ def extract_commands(command_string: str) -> list[str]:
tokens = shlex.split(segment)
except ValueError:
# Malformed command (unclosed quotes, etc.)
- # Return empty to trigger block (fail-safe)
+ # This is common on Windows with backslash paths in quoted strings
+ # Use fallback parser instead of blocking
+ fallback_commands = _fallback_extract_commands(command_string)
+ if fallback_commands:
+ return fallback_commands
+ # If fallback also found nothing, return empty to trigger block
return []
if not tokens:
@@ -106,7 +270,8 @@ def extract_commands(command_string: str) -> list[str]:
if expect_command:
# Extract the base command name (handle paths like /usr/bin/python)
- cmd = os.path.basename(token)
+ # Use cross-platform basename for Windows paths on Linux CI
+ cmd = _cross_platform_basename(token)
commands.append(cmd)
expect_command = False
diff --git a/apps/backend/security/profile.py b/apps/backend/security/profile.py
index da75cff174..a3087a65bb 100644
--- a/apps/backend/security/profile.py
+++ b/apps/backend/security/profile.py
@@ -9,11 +9,12 @@
from pathlib import Path
from project_analyzer import (
- ProjectAnalyzer,
SecurityProfile,
get_or_create_profile,
)
+from .constants import ALLOWLIST_FILENAME, PROFILE_FILENAME
+
# =============================================================================
# GLOBAL STATE
# =============================================================================
@@ -23,18 +24,33 @@
_cached_project_dir: Path | None = None
_cached_spec_dir: Path | None = None # Track spec directory for cache key
_cached_profile_mtime: float | None = None # Track file modification time
+_cached_allowlist_mtime: float | None = None # Track allowlist modification time
def _get_profile_path(project_dir: Path) -> Path:
"""Get the security profile file path for a project."""
- return project_dir / ProjectAnalyzer.PROFILE_FILENAME
+ return project_dir / PROFILE_FILENAME
+
+
+def _get_allowlist_path(project_dir: Path) -> Path:
+ """Get the allowlist file path for a project."""
+ return project_dir / ALLOWLIST_FILENAME
def _get_profile_mtime(project_dir: Path) -> float | None:
"""Get the modification time of the security profile file, or None if not exists."""
profile_path = _get_profile_path(project_dir)
try:
- return profile_path.stat().st_mtime if profile_path.exists() else None
+ return profile_path.stat().st_mtime
+ except OSError:
+ return None
+
+
+def _get_allowlist_mtime(project_dir: Path) -> float | None:
+ """Get the modification time of the allowlist file, or None if not exists."""
+ allowlist_path = _get_allowlist_path(project_dir)
+ try:
+ return allowlist_path.stat().st_mtime
except OSError:
return None
@@ -49,6 +65,7 @@ def get_security_profile(
- The project directory changes
- The security profile file is created (was None, now exists)
- The security profile file is modified (mtime changed)
+ - The allowlist file is created, modified, or deleted
Args:
project_dir: Project root directory
@@ -57,7 +74,11 @@ def get_security_profile(
Returns:
SecurityProfile for the project
"""
- global _cached_profile, _cached_project_dir, _cached_spec_dir, _cached_profile_mtime
+ global _cached_profile
+ global _cached_project_dir
+ global _cached_spec_dir
+ global _cached_profile_mtime
+ global _cached_allowlist_mtime
project_dir = Path(project_dir).resolve()
resolved_spec_dir = Path(spec_dir).resolve() if spec_dir else None
@@ -68,30 +89,40 @@ def get_security_profile(
and _cached_project_dir == project_dir
and _cached_spec_dir == resolved_spec_dir
):
- # Check if file has been created or modified since caching
- current_mtime = _get_profile_mtime(project_dir)
- # Cache is valid if:
- # - Both are None (file never existed and still doesn't)
- # - Both have same mtime (file unchanged)
- if current_mtime == _cached_profile_mtime:
+ # Check if files have been created or modified since caching
+ current_profile_mtime = _get_profile_mtime(project_dir)
+ current_allowlist_mtime = _get_allowlist_mtime(project_dir)
+
+ # Cache is valid if both mtimes are unchanged
+ if (
+ current_profile_mtime == _cached_profile_mtime
+ and current_allowlist_mtime == _cached_allowlist_mtime
+ ):
return _cached_profile
- # File was created or modified - invalidate cache
- # (This happens when analyzer creates the file after agent starts)
+ # File was created, modified, or deleted - invalidate cache
+ # (This happens when analyzer creates the file after agent starts,
+ # or when user adds/updates the allowlist)
# Analyze and cache
_cached_profile = get_or_create_profile(project_dir, spec_dir)
_cached_project_dir = project_dir
_cached_spec_dir = resolved_spec_dir
_cached_profile_mtime = _get_profile_mtime(project_dir)
+ _cached_allowlist_mtime = _get_allowlist_mtime(project_dir)
return _cached_profile
def reset_profile_cache() -> None:
"""Reset the cached profile (useful for testing or re-analysis)."""
- global _cached_profile, _cached_project_dir, _cached_spec_dir, _cached_profile_mtime
+ global _cached_profile
+ global _cached_project_dir
+ global _cached_spec_dir
+ global _cached_profile_mtime
+ global _cached_allowlist_mtime
_cached_profile = None
_cached_project_dir = None
_cached_spec_dir = None
_cached_profile_mtime = None
+ _cached_allowlist_mtime = None
diff --git a/apps/backend/security/validator.py b/apps/backend/security/validator.py
index 7727f012fa..c1ca28983a 100644
--- a/apps/backend/security/validator.py
+++ b/apps/backend/security/validator.py
@@ -33,7 +33,11 @@
validate_init_script,
validate_rm_command,
)
-from .git_validators import validate_git_commit
+from .git_validators import (
+ validate_git_command,
+ validate_git_commit,
+ validate_git_config,
+)
from .process_validators import (
validate_kill_command,
validate_killall_command,
@@ -60,6 +64,8 @@
"validate_init_script",
# Git validators
"validate_git_commit",
+ "validate_git_command",
+ "validate_git_config",
# Database validators
"validate_dropdb_command",
"validate_dropuser_command",
diff --git a/apps/backend/spec/compaction.py b/apps/backend/spec/compaction.py
index d74b377ce2..9538585ec3 100644
--- a/apps/backend/spec/compaction.py
+++ b/apps/backend/spec/compaction.py
@@ -16,7 +16,7 @@
async def summarize_phase_output(
phase_name: str,
phase_output: str,
- model: str = "claude-sonnet-4-5-20250929",
+ model: str = "sonnet", # Shorthand - resolved via API Profile if configured
target_words: int = 500,
) -> str:
"""
@@ -73,9 +73,12 @@ async def summarize_phase_output(
await client.query(prompt)
response_text = ""
async for msg in client.receive_response():
- if hasattr(msg, "content"):
+ msg_type = type(msg).__name__
+ if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
- if hasattr(block, "text"):
+ # Must check block type - only TextBlock has .text attribute
+ block_type = type(block).__name__
+ if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
return response_text.strip()
except Exception as e:
diff --git a/apps/backend/spec/pipeline/orchestrator.py b/apps/backend/spec/pipeline/orchestrator.py
index 76c04d4719..3396f905bd 100644
--- a/apps/backend/spec/pipeline/orchestrator.py
+++ b/apps/backend/spec/pipeline/orchestrator.py
@@ -57,7 +57,7 @@ def __init__(
spec_name: str | None = None,
spec_dir: Path
| None = None, # Use existing spec directory (for UI integration)
- model: str = "claude-sonnet-4-5-20250929",
+ model: str = "sonnet", # Shorthand - resolved via API Profile if configured
thinking_level: str = "medium", # Thinking level for extended thinking
complexity_override: str | None = None, # Force a specific complexity
use_ai_assessment: bool = True, # Use AI for complexity assessment (vs heuristics)
@@ -173,10 +173,11 @@ async def _store_phase_summary(self, phase_name: str) -> None:
return
# Summarize the output
+ # Use sonnet shorthand - will resolve via API Profile if configured
summary = await summarize_phase_output(
phase_name,
phase_output,
- model="claude-sonnet-4-5-20250929", # Use Sonnet for efficiency
+ model="sonnet",
target_words=500,
)
diff --git a/apps/backend/task_logger/capture.py b/apps/backend/task_logger/capture.py
index 346011e20f..f96d893f49 100644
--- a/apps/backend/task_logger/capture.py
+++ b/apps/backend/task_logger/capture.py
@@ -88,17 +88,20 @@ def process_message(
inp = block.input
if isinstance(inp, dict):
# Extract meaningful input description
+ # Increased limits to avoid hiding critical information
if "pattern" in inp:
tool_input = f"pattern: {inp['pattern']}"
elif "file_path" in inp:
fp = inp["file_path"]
- if len(fp) > 50:
- fp = "..." + fp[-47:]
+ # Show last 200 chars for paths (enough for most file paths)
+ if len(fp) > 200:
+ fp = "..." + fp[-197:]
tool_input = fp
elif "command" in inp:
cmd = inp["command"]
- if len(cmd) > 50:
- cmd = cmd[:47] + "..."
+ # Show first 300 chars for commands (enough for most commands)
+ if len(cmd) > 300:
+ cmd = cmd[:297] + "..."
tool_input = cmd
elif "path" in inp:
tool_input = inp["path"]
diff --git a/apps/backend/task_logger/logger.py b/apps/backend/task_logger/logger.py
index 884bb90cea..954814464c 100644
--- a/apps/backend/task_logger/logger.py
+++ b/apps/backend/task_logger/logger.py
@@ -406,10 +406,10 @@ def tool_start(
"""
phase_key = (phase or self.current_phase or LogPhase.CODING).value
- # Truncate long inputs for display
+ # Truncate long inputs for display (increased limit to avoid hiding critical info)
display_input = tool_input
- if display_input and len(display_input) > 100:
- display_input = display_input[:97] + "..."
+ if display_input and len(display_input) > 300:
+ display_input = display_input[:297] + "..."
entry = LogEntry(
timestamp=self._timestamp(),
@@ -462,10 +462,10 @@ def tool_end(
"""
phase_key = (phase or self.current_phase or LogPhase.CODING).value
- # Truncate long results for display
+ # Truncate long results for display (increased limit to avoid hiding critical info)
display_result = result
- if display_result and len(display_result) > 100:
- display_result = display_result[:97] + "..."
+ if display_result and len(display_result) > 300:
+ display_result = display_result[:297] + "..."
status = "Done" if success else "Error"
content = f"[{tool_name}] {status}"
diff --git a/apps/backend/ui/boxes.py b/apps/backend/ui/boxes.py
index 317c4a913f..27921ed29f 100644
--- a/apps/backend/ui/boxes.py
+++ b/apps/backend/ui/boxes.py
@@ -95,11 +95,54 @@ def box(
for line in content:
# Strip ANSI for length calculation
visible_line = re.sub(r"\033\[[0-9;]*m", "", line)
- padding = inner_width - len(visible_line) - 2 # -2 for padding spaces
+ visible_len = len(visible_line)
+ padding = inner_width - visible_len - 2 # -2 for padding spaces
+
if padding < 0:
- # Truncate if too long
- line = line[: inner_width - 5] + "..."
- padding = 0
+ # Line is too long - need to truncate intelligently
+ # Calculate how much to remove (visible characters only)
+ chars_to_remove = abs(padding) + 3 # +3 for "..."
+ target_len = visible_len - chars_to_remove
+
+ if target_len <= 0:
+ # Line is way too long, just show "..."
+ line = "..."
+ padding = inner_width - 5 # 3 for "..." + 2 for padding
+ else:
+ # Truncate the visible text, preserving ANSI codes for what remains
+ # Split line into segments (ANSI code vs text)
+ segments = re.split(r"(\033\[[0-9;]*m)", line)
+ visible_chars = 0
+ result_segments = []
+
+ for segment in segments:
+ if re.match(r"\033\[[0-9;]*m", segment):
+ # ANSI code - include it without counting
+ result_segments.append(segment)
+ else:
+ # Text segment - count visible characters
+ remaining_space = target_len - visible_chars
+ if remaining_space <= 0:
+ break
+ if len(segment) <= remaining_space:
+ result_segments.append(segment)
+ visible_chars += len(segment)
+ else:
+ # Truncate this segment at word boundary if possible
+ truncated = segment[:remaining_space]
+ # Try to truncate at last space to avoid mid-word cuts
+ last_space = truncated.rfind(" ")
+ if (
+ last_space > remaining_space * 0.7
+ ): # Only if space is in last 30%
+ truncated = truncated[:last_space]
+ result_segments.append(truncated)
+ visible_chars += len(truncated)
+ break
+
+ line = "".join(result_segments) + "..."
+ padding = 0
+
lines.append(v + " " + line + " " * (padding + 1) + v)
# Bottom border
diff --git a/apps/backend/ui/capabilities.py b/apps/backend/ui/capabilities.py
index ac8de510d0..26390abbf5 100644
--- a/apps/backend/ui/capabilities.py
+++ b/apps/backend/ui/capabilities.py
@@ -13,6 +13,61 @@
import sys
+def enable_windows_ansi_support() -> bool:
+ """
+ Enable ANSI escape sequence support on Windows.
+
+ Windows 10 (build 10586+) supports ANSI escape sequences natively,
+ but they must be explicitly enabled via the Windows API.
+
+ Returns:
+ True if ANSI support was enabled, False otherwise
+ """
+ if sys.platform != "win32":
+ return True # Non-Windows always has ANSI support
+
+ try:
+ import ctypes
+ from ctypes import wintypes
+
+ # Windows constants
+ STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
+ ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
+
+ kernel32 = ctypes.windll.kernel32
+
+ # Get handles
+ for handle_id in (STD_OUTPUT_HANDLE, STD_ERROR_HANDLE):
+ handle = kernel32.GetStdHandle(handle_id)
+ if handle == -1:
+ continue
+
+ # Get current console mode
+ mode = wintypes.DWORD()
+ if not kernel32.GetConsoleMode(handle, ctypes.byref(mode)):
+ continue
+
+ # Enable ANSI support if not already enabled
+ if not (mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING):
+ kernel32.SetConsoleMode(
+ handle, mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ )
+
+ return True
+ except (ImportError, AttributeError, OSError):
+ # Fall back to colorama if available
+ try:
+ import colorama
+
+ colorama.init()
+ return True
+ except ImportError:
+ pass
+
+ return False
+
+
def configure_safe_encoding() -> None:
"""
Configure stdout/stderr to handle Unicode safely on Windows.
@@ -54,8 +109,9 @@ def configure_safe_encoding() -> None:
pass
-# Configure safe encoding on module import
+# Configure safe encoding and ANSI support on module import
configure_safe_encoding()
+WINDOWS_ANSI_ENABLED = enable_windows_ansi_support()
def _is_fancy_ui_enabled() -> bool:
diff --git a/apps/backend/ui/icons.py b/apps/backend/ui/icons.py
index 2f27496162..13675eb369 100644
--- a/apps/backend/ui/icons.py
+++ b/apps/backend/ui/icons.py
@@ -39,9 +39,10 @@ class Icons:
FILE = ("📄", "[F]")
GEAR = ("⚙", "[*]")
SEARCH = ("🔍", "[?]")
- BRANCH = ("", "[B]")
+ BRANCH = ("🌿", "[BR]") # [BR] to avoid collision with BLOCKED [B]
COMMIT = ("◉", "(@)")
LIGHTNING = ("⚡", "!")
+ LINK = ("🔗", "[L]") # For PR URLs
# Progress
SUBTASK = ("▣", "#")
diff --git a/apps/frontend/.env.example b/apps/frontend/.env.example
index f01b56f27a..d5d246749d 100644
--- a/apps/frontend/.env.example
+++ b/apps/frontend/.env.example
@@ -19,6 +19,34 @@
# Shows detailed information about app update checks and downloads
# DEBUG_UPDATER=true
+# ============================================
+# SENTRY ERROR REPORTING
+# ============================================
+
+# Sentry DSN for anonymous error reporting
+# If not set, error reporting is completely disabled (safe for forks)
+#
+# For official builds: Set in CI/CD secrets
+# For local testing: Uncomment and add your DSN
+#
+# SENTRY_DSN=https://your-dsn@sentry.io/project-id
+
+# Force enable Sentry in development mode (normally disabled in dev)
+# Only works when SENTRY_DSN is also set
+# SENTRY_DEV=true
+
+# Trace sample rate for performance monitoring (0.0 to 1.0)
+# Controls what percentage of transactions are sampled
+# Default: 0.1 (10%) in production, 0 in development
+# Set to 0 to disable performance monitoring entirely
+# SENTRY_TRACES_SAMPLE_RATE=0.1
+
+# Profile sample rate for profiling (0.0 to 1.0)
+# Controls what percentage of sampled transactions include profiling data
+# Default: 0.1 (10%) in production, 0 in development
+# Set to 0 to disable profiling entirely
+# SENTRY_PROFILES_SAMPLE_RATE=0.1
+
# ============================================
# HOW TO USE
# ============================================
diff --git a/apps/frontend/e2e/task-workflow.spec.ts b/apps/frontend/e2e/task-workflow.spec.ts
new file mode 100644
index 0000000000..19cebabebc
--- /dev/null
+++ b/apps/frontend/e2e/task-workflow.spec.ts
@@ -0,0 +1,341 @@
+/**
+ * End-to-End tests for full task workflow
+ * Tests: create → spec → subtasks → resume
+ *
+ * NOTE: These tests require the Electron app to be built first.
+ * Run `npm run build` before running E2E tests.
+ *
+ * To run: npx playwright test task-workflow --config=e2e/playwright.config.ts
+ */
+import { test, expect } from '@playwright/test';
+import { mkdirSync, mkdtempSync, rmSync, existsSync, writeFileSync, readFileSync } from 'fs';
+import { tmpdir } from 'os';
+import path from 'path';
+
+// Test data directory - created securely with mkdtempSync to prevent TOCTOU attacks
+let TEST_DATA_DIR: string;
+let TEST_PROJECT_DIR: string;
+let SPECS_DIR: string;
+
+// Setup test environment with secure temp directory
+function setupTestEnvironment(): void {
+ // Create secure temp directory with random suffix
+ TEST_DATA_DIR = mkdtempSync(path.join(tmpdir(), 'auto-claude-task-workflow-e2e-'));
+ TEST_PROJECT_DIR = path.join(TEST_DATA_DIR, 'test-project');
+ SPECS_DIR = path.join(TEST_PROJECT_DIR, '.auto-claude', 'specs');
+ mkdirSync(TEST_PROJECT_DIR, { recursive: true });
+ mkdirSync(SPECS_DIR, { recursive: true });
+}
+
+// Cleanup test environment
+function cleanupTestEnvironment(): void {
+ if (existsSync(TEST_DATA_DIR)) {
+ rmSync(TEST_DATA_DIR, { recursive: true, force: true });
+ }
+}
+
+// Helper to create a task spec with subtasks
+function createTaskWithSubtasks(
+ specId: string,
+ subtaskStatuses: Array<'pending' | 'in_progress' | 'completed'>
+): void {
+ const specDir = path.join(SPECS_DIR, specId);
+ mkdirSync(specDir, { recursive: true });
+
+ // Create spec.md
+ writeFileSync(
+ path.join(specDir, 'spec.md'),
+ `# ${specId}\n\n## Overview\n\nTest task for workflow validation.\n\n## Acceptance Criteria\n\n- [ ] All subtasks completed\n- [ ] Tests pass\n`
+ );
+
+ // Create requirements.json
+ writeFileSync(
+ path.join(specDir, 'requirements.json'),
+ JSON.stringify(
+ {
+ task_description: `Test task ${specId}`,
+ user_requirements: ['Requirement 1', 'Requirement 2'],
+ acceptance_criteria: ['All subtasks completed', 'Tests pass'],
+ context: []
+ },
+ null,
+ 2
+ )
+ );
+
+ // Create implementation_plan.json with subtasks
+ const subtasks = subtaskStatuses.map((status, index) => ({
+ id: `subtask-${index + 1}`,
+ phase: 'Implementation',
+ service: 'backend',
+ description: `Subtask ${index + 1}: Implement feature part ${index + 1}`,
+ files_to_modify: [`src/file${index + 1}.py`],
+ files_to_create: [],
+ pattern_files: [],
+ verification_command: 'pytest tests/',
+ status: status,
+ notes: status === 'completed' ? 'Completed successfully' : ''
+ }));
+
+ writeFileSync(
+ path.join(specDir, 'implementation_plan.json'),
+ JSON.stringify(
+ {
+ feature: `Test Feature ${specId}`,
+ workflow_type: 'feature',
+ services_involved: ['backend'],
+ subtasks: subtasks,
+ final_acceptance: ['All subtasks completed', 'Tests pass'],
+ created_at: new Date().toISOString(),
+ updated_at: new Date().toISOString(),
+ spec_file: 'spec.md'
+ },
+ null,
+ 2
+ )
+ );
+
+ // Create build-progress.txt
+ writeFileSync(
+ path.join(specDir, 'build-progress.txt'),
+ `Task Progress: ${specId}\n\nSubtasks: ${subtasks.length}\nCompleted: ${subtasks.filter(s => s.status === 'completed').length}\n`
+ );
+}
+
+// Helper to simulate task resumption
+function simulateTaskResume(specId: string): void {
+ const planPath = path.join(SPECS_DIR, specId, 'implementation_plan.json');
+ const plan = JSON.parse(readFileSync(planPath, 'utf-8'));
+
+ // Find first pending subtask and mark as in_progress
+ const pendingSubtask = plan.subtasks.find((st: { status: string }) => st.status === 'pending');
+ if (pendingSubtask) {
+ pendingSubtask.status = 'in_progress';
+ pendingSubtask.notes = 'Resumed from checkpoint';
+ }
+
+ plan.updated_at = new Date().toISOString();
+ writeFileSync(planPath, JSON.stringify(plan, null, 2));
+}
+
+test.describe('Task Workflow E2E Tests', () => {
+ test.beforeAll(() => {
+ setupTestEnvironment();
+ });
+
+ test.afterAll(() => {
+ cleanupTestEnvironment();
+ });
+
+ test('should create task directory structure', () => {
+ const specId = '001-test-task';
+ const specDir = path.join(SPECS_DIR, specId);
+ mkdirSync(specDir, { recursive: true });
+
+ // Verify directory created
+ expect(existsSync(specDir)).toBe(true);
+ });
+
+ test('should generate spec.md file', () => {
+ const specId = '002-task-with-spec';
+ const specDir = path.join(SPECS_DIR, specId);
+ mkdirSync(specDir, { recursive: true });
+
+ // Write spec
+ const specContent = '# Test Task\n\n## Overview\n\nThis is a test task.\n';
+ writeFileSync(path.join(specDir, 'spec.md'), specContent);
+
+ // Verify spec file
+ expect(existsSync(path.join(specDir, 'spec.md'))).toBe(true);
+ const content = readFileSync(path.join(specDir, 'spec.md'), 'utf-8');
+ expect(content).toContain('Test Task');
+ });
+
+ test('should create implementation plan with subtasks', () => {
+ const specId = '003-task-with-subtasks';
+ createTaskWithSubtasks(specId, ['pending', 'pending', 'pending']);
+
+ const planPath = path.join(SPECS_DIR, specId, 'implementation_plan.json');
+ expect(existsSync(planPath)).toBe(true);
+
+ const plan = JSON.parse(readFileSync(planPath, 'utf-8'));
+ expect(plan.subtasks).toBeDefined();
+ expect(plan.subtasks.length).toBe(3);
+ expect(plan.subtasks[0].status).toBe('pending');
+ });
+
+ test('should track subtask progress', () => {
+ const specId = '004-task-in-progress';
+ createTaskWithSubtasks(specId, ['completed', 'in_progress', 'pending']);
+
+ const planPath = path.join(SPECS_DIR, specId, 'implementation_plan.json');
+ const plan = JSON.parse(readFileSync(planPath, 'utf-8'));
+
+ expect(plan.subtasks[0].status).toBe('completed');
+ expect(plan.subtasks[1].status).toBe('in_progress');
+ expect(plan.subtasks[2].status).toBe('pending');
+ });
+
+ test('should resume task from checkpoint', () => {
+ const specId = '005-task-resume';
+ createTaskWithSubtasks(specId, ['completed', 'pending', 'pending']);
+
+ // Verify initial state
+ let plan = JSON.parse(readFileSync(path.join(SPECS_DIR, specId, 'implementation_plan.json'), 'utf-8'));
+ expect(plan.subtasks[1].status).toBe('pending');
+
+ // Simulate resume
+ simulateTaskResume(specId);
+
+ // Verify resumed state
+ plan = JSON.parse(readFileSync(path.join(SPECS_DIR, specId, 'implementation_plan.json'), 'utf-8'));
+ expect(plan.subtasks[1].status).toBe('in_progress');
+ expect(plan.subtasks[1].notes).toContain('Resumed from checkpoint');
+ });
+
+ test('should complete all subtasks in sequence', () => {
+ const specId = '006-task-completion';
+ createTaskWithSubtasks(specId, ['completed', 'completed', 'completed']);
+
+ const plan = JSON.parse(readFileSync(path.join(SPECS_DIR, specId, 'implementation_plan.json'), 'utf-8'));
+ const allCompleted = plan.subtasks.every((st: { status: string }) => st.status === 'completed');
+
+ expect(allCompleted).toBe(true);
+ });
+
+ test('should maintain build progress log', () => {
+ const specId = '007-task-with-progress';
+ createTaskWithSubtasks(specId, ['completed', 'in_progress', 'pending']);
+
+ const progressPath = path.join(SPECS_DIR, specId, 'build-progress.txt');
+ expect(existsSync(progressPath)).toBe(true);
+
+ const progressContent = readFileSync(progressPath, 'utf-8');
+ expect(progressContent).toContain('Task Progress');
+ expect(progressContent).toContain('Subtasks: 3');
+ });
+});
+
+test.describe('Full Task Workflow Integration', () => {
+ test.beforeAll(() => {
+ setupTestEnvironment();
+ });
+
+ test.afterAll(() => {
+ cleanupTestEnvironment();
+ });
+
+ test('should complete full workflow: create → spec → subtasks → resume → complete', () => {
+ const specId = '100-full-workflow';
+
+ // Step 1: Create task
+ const specDir = path.join(SPECS_DIR, specId);
+ mkdirSync(specDir, { recursive: true });
+ expect(existsSync(specDir)).toBe(true);
+
+ // Step 2: Generate spec
+ writeFileSync(
+ path.join(specDir, 'spec.md'),
+ '# Full Workflow Test\n\n## Overview\n\nComplete workflow test.\n'
+ );
+ expect(existsSync(path.join(specDir, 'spec.md'))).toBe(true);
+
+ // Step 3: Create subtasks
+ createTaskWithSubtasks(specId, ['pending', 'pending', 'pending']);
+ let plan = JSON.parse(readFileSync(path.join(specDir, 'implementation_plan.json'), 'utf-8'));
+ expect(plan.subtasks.length).toBe(3);
+
+ // Step 4: Start first subtask
+ plan.subtasks[0].status = 'in_progress';
+ writeFileSync(path.join(specDir, 'implementation_plan.json'), JSON.stringify(plan, null, 2));
+
+ plan = JSON.parse(readFileSync(path.join(specDir, 'implementation_plan.json'), 'utf-8'));
+ expect(plan.subtasks[0].status).toBe('in_progress');
+
+ // Step 5: Complete first subtask
+ plan.subtasks[0].status = 'completed';
+ plan.subtasks[0].notes = 'First subtask completed';
+ writeFileSync(path.join(specDir, 'implementation_plan.json'), JSON.stringify(plan, null, 2));
+
+ // Step 6: Resume with second subtask
+ simulateTaskResume(specId);
+ plan = JSON.parse(readFileSync(path.join(specDir, 'implementation_plan.json'), 'utf-8'));
+ expect(plan.subtasks[1].status).toBe('in_progress');
+
+ // Step 7: Complete remaining subtasks
+ plan.subtasks[1].status = 'completed';
+ plan.subtasks[2].status = 'completed';
+ writeFileSync(path.join(specDir, 'implementation_plan.json'), JSON.stringify(plan, null, 2));
+
+ // Step 8: Verify all completed
+ plan = JSON.parse(readFileSync(path.join(specDir, 'implementation_plan.json'), 'utf-8'));
+ const allCompleted = plan.subtasks.every((st: { status: string }) => st.status === 'completed');
+ expect(allCompleted).toBe(true);
+
+ // Step 9: Verify final state
+ expect(plan.subtasks[0].notes).toContain('First subtask completed');
+ expect(plan.subtasks[1].notes).toContain('Resumed from checkpoint');
+ });
+
+ test('should handle workflow interruption and recovery', () => {
+ const specId = '101-workflow-recovery';
+
+ // Create task with partial progress
+ createTaskWithSubtasks(specId, ['completed', 'in_progress', 'pending']);
+
+ // Simulate interruption (task status is saved)
+ const planPath = path.join(SPECS_DIR, specId, 'implementation_plan.json');
+ let plan = JSON.parse(readFileSync(planPath, 'utf-8'));
+ expect(plan.subtasks[1].status).toBe('in_progress');
+
+ // Simulate recovery: complete interrupted subtask
+ plan.subtasks[1].status = 'completed';
+ plan.subtasks[1].notes = 'Recovered and completed';
+ writeFileSync(planPath, JSON.stringify(plan, null, 2));
+
+ // Resume with next subtask
+ simulateTaskResume(specId);
+ plan = JSON.parse(readFileSync(planPath, 'utf-8'));
+
+ // Verify recovery successful
+ expect(plan.subtasks[1].status).toBe('completed');
+ expect(plan.subtasks[2].status).toBe('in_progress');
+ });
+
+ test('should validate workflow data integrity', () => {
+ const specId = '102-data-integrity';
+ createTaskWithSubtasks(specId, ['pending', 'pending', 'pending']);
+
+ const specDir = path.join(SPECS_DIR, specId);
+
+ // Verify all required files exist
+ expect(existsSync(path.join(specDir, 'spec.md'))).toBe(true);
+ expect(existsSync(path.join(specDir, 'requirements.json'))).toBe(true);
+ expect(existsSync(path.join(specDir, 'implementation_plan.json'))).toBe(true);
+ expect(existsSync(path.join(specDir, 'build-progress.txt'))).toBe(true);
+
+ // Verify data structure integrity
+ const requirements = JSON.parse(readFileSync(path.join(specDir, 'requirements.json'), 'utf-8'));
+ expect(requirements.task_description).toBeDefined();
+ expect(requirements.acceptance_criteria).toBeDefined();
+
+ const plan = JSON.parse(readFileSync(path.join(specDir, 'implementation_plan.json'), 'utf-8'));
+ expect(plan.feature).toBeDefined();
+ expect(plan.subtasks).toBeDefined();
+ expect(plan.created_at).toBeDefined();
+ expect(plan.updated_at).toBeDefined();
+
+ // Verify subtask structure
+ plan.subtasks.forEach((subtask: {
+ id: string;
+ description: string;
+ status: string;
+ verification_command: string;
+ }) => {
+ expect(subtask.id).toBeDefined();
+ expect(subtask.description).toBeDefined();
+ expect(subtask.status).toMatch(/^(pending|in_progress|completed)$/);
+ expect(subtask.verification_command).toBeDefined();
+ });
+ });
+});
diff --git a/apps/frontend/e2e/terminal-copy-paste.e2e.ts b/apps/frontend/e2e/terminal-copy-paste.e2e.ts
new file mode 100644
index 0000000000..8902600ee1
--- /dev/null
+++ b/apps/frontend/e2e/terminal-copy-paste.e2e.ts
@@ -0,0 +1,335 @@
+/**
+ * End-to-End tests for terminal copy/paste functionality
+ * Tests copy/paste keyboard shortcuts in the Electron app
+ *
+ * These tests require the Electron app to be built first.
+ * Run `npm run build` before running E2E tests.
+ *
+ * To run: npx playwright test terminal-copy-paste.e2e.ts --config=e2e/playwright.config.ts
+ */
+import { test, expect, _electron as electron, ElectronApplication, Page } from '@playwright/test';
+import { mkdirSync, rmSync, existsSync } from 'fs';
+import path from 'path';
+import * as os from 'os';
+
+// Global Navigator declaration for clipboard
+declare global {
+ interface Navigator {
+ clipboard: {
+ readText(): Promise;
+ writeText(text: string): Promise;
+ };
+ }
+}
+
+// Test data directory
+const TEST_DATA_DIR = path.join(os.tmpdir(), 'auto-claude-terminal-e2e');
+
+// Determine platform for platform-specific tests
+const platform = process.platform;
+const isMac = platform === 'darwin';
+const isWindows = platform === 'win32';
+const isLinux = platform === 'linux';
+
+// Setup test environment
+function setupTestEnvironment(): void {
+ if (existsSync(TEST_DATA_DIR)) {
+ rmSync(TEST_DATA_DIR, { recursive: true, force: true });
+ }
+ mkdirSync(TEST_DATA_DIR, { recursive: true });
+}
+
+// Cleanup test environment
+function cleanupTestEnvironment(): void {
+ if (existsSync(TEST_DATA_DIR)) {
+ rmSync(TEST_DATA_DIR, { recursive: true, force: true });
+ }
+}
+
+// Helper to get platform-specific copy shortcut
+function getCopyShortcutKey(): string {
+ return isMac ? 'Meta' : 'Control';
+}
+
+// Helper to check if test should run on current platform
+function shouldRunForPlatform(testPlatform: 'all' | 'windows' | 'linux' | 'mac'): boolean {
+ if (testPlatform === 'all') return true;
+ if (testPlatform === 'windows') return isWindows;
+ if (testPlatform === 'linux') return isLinux;
+ if (testPlatform === 'mac') return isMac;
+ return false;
+}
+
+test.describe('Terminal Copy/Paste Flows', () => {
+ let app: ElectronApplication;
+ let window: Page;
+ let isAppReady = false;
+
+ test.beforeAll(async () => {
+ setupTestEnvironment();
+ });
+
+ test.afterAll(async () => {
+ cleanupTestEnvironment();
+ });
+
+ test.beforeEach(async () => {
+ // Launch Electron app
+ const appPath = path.join(__dirname, '..');
+ app = await electron.launch({ args: [appPath] });
+
+ window = await app.firstWindow({
+ timeout: 15000
+ });
+
+ // Wait for app to be ready
+ try {
+ await window.waitForSelector('body', { timeout: 10000 });
+ isAppReady = true;
+ } catch (error) {
+ console.error('App failed to load:', error);
+ isAppReady = false;
+ }
+ });
+
+ test.afterEach(async () => {
+ if (app) {
+ await app.close();
+ }
+ });
+
+ test.describe.configure({ mode: 'serial' });
+
+ test('should copy selected text to clipboard', async () => {
+ test.skip(!isAppReady, 'App not ready');
+ test.skip(!shouldRunForPlatform('all'), 'Test not applicable to this platform');
+
+ // Look for terminal element - skip if not found
+ const terminalSelector = '.xterm';
+ const terminalExists = await window.locator(terminalSelector).count() > 0;
+ test.skip(!terminalExists, 'Terminal element not found');
+
+ // Run a command to produce output
+ const terminal = window.locator(terminalSelector).first();
+ await terminal.click();
+
+ // Type echo command and press enter
+ await window.keyboard.type('echo "test output for copy"');
+ await window.keyboard.press('Enter');
+
+ // Wait for output to appear in terminal
+ await expect(terminal).toContainText('test output for copy', { timeout: 5000 });
+
+ // Select text (triple click to select line)
+ await terminal.click({ clickCount: 3 });
+
+ // Wait for selection to be active
+ await window.waitForTimeout(100);
+
+ // Press copy shortcut (Cmd+C on Mac, Ctrl+C on Windows/Linux)
+ const copyKey = getCopyShortcutKey();
+ await window.keyboard.press(`${copyKey}+c`);
+
+ // Wait briefly for clipboard operation
+ await window.waitForTimeout(100);
+
+ // Verify clipboard contains selected text
+ const clipboardText = await window.evaluate(async () => {
+ return await navigator.clipboard.readText();
+ });
+
+ expect(clipboardText).toContain('test output for copy');
+ });
+
+ test('should send interrupt signal when no text selected', async () => {
+ test.skip(!isAppReady, 'App not ready');
+ test.skip(!shouldRunForPlatform('all'), 'Test not applicable to this platform');
+
+ const terminalSelector = '.xterm';
+ const terminalExists = await window.locator(terminalSelector).count() > 0;
+ test.skip(!terminalExists, 'Terminal element not found');
+
+ const terminal = window.locator(terminalSelector).first();
+ await terminal.click();
+
+ // Start a long-running process (sleep on Linux/Mac, timeout on Windows)
+ const sleepCommand = isWindows ? 'timeout 10' : 'sleep 10';
+ await window.keyboard.type(sleepCommand);
+ await window.keyboard.press('Enter');
+
+ // Wait for process to start
+ await window.waitForTimeout(500);
+
+ // Press Ctrl+C without selection (should send interrupt)
+ await window.keyboard.press('Control+c');
+
+ // Wait for interrupt to be processed - look for ^C or new prompt
+ await expect(terminal).toContainText(/\^C|[$#>]/, { timeout: 3000 });
+ });
+
+ test('should paste clipboard text into terminal', async () => {
+ test.skip(!isAppReady, 'App not ready');
+ test.skip(!shouldRunForPlatform('all'), 'Test not applicable to this platform');
+
+ const terminalSelector = '.xterm';
+ const terminalExists = await window.locator(terminalSelector).count() > 0;
+ test.skip(!terminalExists, 'Terminal element not found');
+
+ // Set clipboard content
+ const testText = 'hello world from clipboard';
+ await window.evaluate(async (text) => {
+ await navigator.clipboard.writeText(text);
+ }, testText);
+
+ const terminal = window.locator(terminalSelector).first();
+ await terminal.click();
+
+ // Press paste shortcut
+ const pasteKey = isMac ? 'Meta' : 'Control';
+ await window.keyboard.press(`${pasteKey}+v`);
+
+ // Wait briefly for paste to complete
+ await window.waitForTimeout(100);
+
+ // Press Enter to execute the pasted command
+ await window.keyboard.press('Enter');
+
+ // Verify text was pasted (terminal should show the pasted text or output)
+ await expect(terminal).toContainText(testText, { timeout: 5000 });
+ });
+
+ test('should handle Linux CTRL+SHIFT+C copy shortcut', async () => {
+ test.skip(!isAppReady, 'App not ready');
+ test.skip(!shouldRunForPlatform('linux'), 'Linux-specific test');
+
+ const terminalSelector = '.xterm';
+ const terminalExists = await window.locator(terminalSelector).count() > 0;
+ test.skip(!terminalExists, 'Terminal element not found');
+
+ const terminal = window.locator(terminalSelector).first();
+ await terminal.click();
+
+ // Type command to generate output
+ await window.keyboard.type('echo "linux copy test"');
+ await window.keyboard.press('Enter');
+
+ // Wait for output
+ await expect(terminal).toContainText('linux copy test', { timeout: 5000 });
+
+ // Select text
+ await terminal.click({ clickCount: 3 });
+ await window.waitForTimeout(100);
+
+ // Press CTRL+SHIFT+C (Linux copy shortcut)
+ await window.keyboard.down('Control');
+ await window.keyboard.down('Shift');
+ await window.keyboard.press('c');
+ await window.keyboard.up('Shift');
+ await window.keyboard.up('Control');
+
+ // Wait briefly for clipboard operation
+ await window.waitForTimeout(100);
+
+ // Verify clipboard contains selected text
+ const clipboardText = await window.evaluate(async () => {
+ return await navigator.clipboard.readText();
+ });
+
+ expect(clipboardText).toContain('linux copy test');
+ });
+
+ test('should handle Linux CTRL+SHIFT+V paste shortcut', async () => {
+ test.skip(!isAppReady, 'App not ready');
+ test.skip(!shouldRunForPlatform('linux'), 'Linux-specific test');
+
+ const terminalSelector = '.xterm';
+ const terminalExists = await window.locator(terminalSelector).count() > 0;
+ test.skip(!terminalExists, 'Terminal element not found');
+
+ // Set clipboard content
+ const testText = 'pasted via ctrl+shift+v';
+ await window.evaluate(async (text) => {
+ await navigator.clipboard.writeText(text);
+ }, testText);
+
+ const terminal = window.locator(terminalSelector).first();
+ await terminal.click();
+
+ // Press CTRL+SHIFT+V (Linux paste shortcut)
+ await window.keyboard.down('Control');
+ await window.keyboard.down('Shift');
+ await window.keyboard.press('v');
+ await window.keyboard.up('Shift');
+ await window.keyboard.up('Control');
+
+ // Wait briefly for paste to complete
+ await window.waitForTimeout(100);
+
+ // Press Enter to execute
+ await window.keyboard.press('Enter');
+
+ // Verify text was pasted
+ await expect(terminal).toContainText(testText, { timeout: 5000 });
+ });
+
+ test('should verify existing shortcuts still work', async () => {
+ test.skip(!isAppReady, 'App not ready');
+ test.skip(!shouldRunForPlatform('all'), 'Test not applicable to this platform');
+
+ const terminalSelector = '.xterm';
+ const terminalExists = await window.locator(terminalSelector).count() > 0;
+ test.skip(!terminalExists, 'Terminal element not found');
+
+ const terminal = window.locator(terminalSelector).first();
+ await terminal.click();
+
+ // Test SHIFT+Enter (multi-line input)
+ await window.keyboard.type('echo "line 1"');
+ await window.keyboard.down('Shift');
+ await window.keyboard.press('Enter');
+ await window.keyboard.up('Shift');
+ await window.keyboard.type('echo "line 2"');
+ await window.keyboard.press('Enter');
+
+ // Verify multi-line input worked (both commands should execute)
+ await expect(terminal).toContainText('line 1', { timeout: 5000 });
+ await expect(terminal).toContainText('line 2', { timeout: 5000 });
+ });
+
+ test('should handle clipboard errors gracefully', async () => {
+ test.skip(!isAppReady, 'App not ready');
+ test.skip(!shouldRunForPlatform('all'), 'Test not applicable to this platform');
+
+ const terminalSelector = '.xterm';
+ const terminalExists = await window.locator(terminalSelector).count() > 0;
+ test.skip(!terminalExists, 'Terminal element not found');
+
+ // Mock clipboard permission denial by clearing clipboard
+ await window.evaluate(async () => {
+ // Try to read clipboard (may fail if permission denied)
+ try {
+ await navigator.clipboard.readText();
+ } catch (_error) {
+ // Expected - clipboard may not be accessible in test environment
+ console.warn('Clipboard not accessible (expected in some environments)');
+ }
+ });
+
+ const terminal = window.locator(terminalSelector).first();
+ await terminal.click();
+
+ // Try to paste even if clipboard is not accessible
+ const pasteKey = isMac ? 'Meta' : 'Control';
+ await window.keyboard.press(`${pasteKey}+v`);
+
+ // Wait briefly to ensure terminal remains stable
+ await window.waitForTimeout(100);
+
+ // Try typing to verify terminal still works
+ await window.keyboard.type('echo "terminal still works"');
+ await window.keyboard.press('Enter');
+
+ // Verify terminal still functions after clipboard error
+ await expect(terminal).toContainText('terminal still works', { timeout: 5000 });
+ });
+});
diff --git a/apps/frontend/package-lock.json b/apps/frontend/package-lock.json
index 9abc6c3090..e81abc2d9b 100644
--- a/apps/frontend/package-lock.json
+++ b/apps/frontend/package-lock.json
@@ -32,38 +32,38 @@
"@radix-ui/react-tooltip": "^1.2.8",
"@tailwindcss/typography": "^0.5.19",
"@tanstack/react-virtual": "^3.13.13",
- "@xterm/addon-fit": "^0.11.0",
- "@xterm/addon-serialize": "^0.14.0",
- "@xterm/addon-web-links": "^0.12.0",
- "@xterm/addon-webgl": "^0.19.0",
- "@xterm/xterm": "^6.0.0",
+ "@xterm/addon-fit": "^0.10.0",
+ "@xterm/addon-serialize": "^0.13.0",
+ "@xterm/addon-web-links": "^0.11.0",
+ "@xterm/addon-webgl": "^0.18.0",
+ "@xterm/xterm": "^5.5.0",
"chokidar": "^5.0.0",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"electron-log": "^5.4.3",
"electron-updater": "^6.6.2",
"i18next": "^25.7.3",
- "lucide-react": "^0.562.0",
+ "lucide-react": "^0.560.0",
"motion": "^12.23.26",
"react": "^19.2.3",
"react-dom": "^19.2.3",
"react-i18next": "^16.5.0",
"react-markdown": "^10.1.0",
- "react-resizable-panels": "^4.2.0",
+ "react-resizable-panels": "^3.0.6",
"remark-gfm": "^4.0.1",
"semver": "^7.7.3",
"tailwind-merge": "^3.4.0",
"uuid": "^13.0.0",
- "zod": "^4.2.1",
"zustand": "^5.0.9"
},
"devDependencies": {
"@electron-toolkit/preload": "^3.0.2",
"@electron-toolkit/utils": "^4.0.0",
- "@electron/rebuild": "^4.0.2",
+ "@electron/rebuild": "^3.7.1",
"@eslint/js": "^9.39.1",
"@playwright/test": "^1.52.0",
"@tailwindcss/postcss": "^4.1.17",
+ "@testing-library/jest-dom": "^6.9.1",
"@testing-library/react": "^16.1.0",
"@types/node": "^25.0.0",
"@types/react": "^19.2.7",
@@ -72,33 +72,32 @@
"@types/uuid": "^10.0.0",
"@vitejs/plugin-react": "^5.1.2",
"autoprefixer": "^10.4.22",
- "cross-env": "^10.1.0",
"electron": "^39.2.7",
"electron-builder": "^26.0.12",
"electron-vite": "^5.0.0",
"eslint": "^9.39.1",
"eslint-plugin-react": "^7.37.5",
"eslint-plugin-react-hooks": "^7.0.1",
- "globals": "^17.0.0",
+ "globals": "^16.5.0",
"husky": "^9.1.7",
- "jsdom": "^27.3.0",
+ "jsdom": "^26.0.0",
"lint-staged": "^16.2.7",
"postcss": "^8.5.6",
"tailwindcss": "^4.1.17",
"typescript": "^5.9.3",
- "typescript-eslint": "^8.50.1",
+ "typescript-eslint": "^8.49.0",
"vite": "^7.2.7",
- "vitest": "^4.0.16"
+ "vitest": "^4.0.15"
},
"engines": {
"node": ">=24.0.0",
"npm": ">=10.0.0"
}
},
- "node_modules/@acemir/cssom": {
- "version": "0.9.30",
- "resolved": "https://registry.npmjs.org/@acemir/cssom/-/cssom-0.9.30.tgz",
- "integrity": "sha512-9CnlMCI0LmCIq0olalQqdWrJHPzm0/tw3gzOA9zJSgvFX7Xau3D24mAGa4BtwxwY69nsuJW6kQqqCzf/mEcQgg==",
+ "node_modules/@adobe/css-tools": {
+ "version": "4.4.4",
+ "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz",
+ "integrity": "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==",
"dev": true,
"license": "MIT"
},
@@ -116,59 +115,25 @@
}
},
"node_modules/@asamuzakjp/css-color": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.1.1.tgz",
- "integrity": "sha512-B0Hv6G3gWGMn0xKJ0txEi/jM5iFpT3MfDxmhZFb4W047GvytCf1DHQ1D69W3zHI4yWe2aTZAA0JnbMZ7Xc8DuQ==",
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz",
+ "integrity": "sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@csstools/css-calc": "^2.1.4",
- "@csstools/css-color-parser": "^3.1.0",
- "@csstools/css-parser-algorithms": "^3.0.5",
- "@csstools/css-tokenizer": "^3.0.4",
- "lru-cache": "^11.2.4"
+ "@csstools/css-calc": "^2.1.3",
+ "@csstools/css-color-parser": "^3.0.9",
+ "@csstools/css-parser-algorithms": "^3.0.4",
+ "@csstools/css-tokenizer": "^3.0.3",
+ "lru-cache": "^10.4.3"
}
},
"node_modules/@asamuzakjp/css-color/node_modules/lru-cache": {
- "version": "11.2.4",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz",
- "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "engines": {
- "node": "20 || >=22"
- }
- },
- "node_modules/@asamuzakjp/dom-selector": {
- "version": "6.7.6",
- "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.7.6.tgz",
- "integrity": "sha512-hBaJER6A9MpdG3WgdlOolHmbOYvSk46y7IQN/1+iqiCuUu6iWdQrs9DGKF8ocqsEqWujWf/V7b7vaDgiUmIvUg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@asamuzakjp/nwsapi": "^2.3.9",
- "bidi-js": "^1.0.3",
- "css-tree": "^3.1.0",
- "is-potential-custom-element-name": "^1.0.1",
- "lru-cache": "^11.2.4"
- }
- },
- "node_modules/@asamuzakjp/dom-selector/node_modules/lru-cache": {
- "version": "11.2.4",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz",
- "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "engines": {
- "node": "20 || >=22"
- }
- },
- "node_modules/@asamuzakjp/nwsapi": {
- "version": "2.3.9",
- "resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz",
- "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==",
+ "version": "10.4.3",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz",
+ "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==",
"dev": true,
- "license": "MIT"
+ "license": "ISC"
},
"node_modules/@babel/code-frame": {
"version": "7.27.1",
@@ -592,26 +557,6 @@
"@csstools/css-tokenizer": "^3.0.4"
}
},
- "node_modules/@csstools/css-syntax-patches-for-csstree": {
- "version": "1.0.22",
- "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.22.tgz",
- "integrity": "sha512-qBcx6zYlhleiFfdtzkRgwNC7VVoAwfK76Vmsw5t+PbvtdknO9StgRk7ROvq9so1iqbdW4uLIDAsXRsTfUrIoOw==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/csstools"
- },
- {
- "type": "opencollective",
- "url": "https://opencollective.com/csstools"
- }
- ],
- "license": "MIT-0",
- "engines": {
- "node": ">=18"
- }
- },
"node_modules/@csstools/css-tokenizer": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz",
@@ -741,6 +686,28 @@
"node": ">=10.12.0"
}
},
+ "node_modules/@electron/asar/node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "deprecated": "Glob versions prior to v9 are no longer supported",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
"node_modules/@electron/asar/node_modules/minimatch": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
@@ -785,29 +752,6 @@
"node": ">=10"
}
},
- "node_modules/@electron/fuses/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/@electron/fuses/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
"node_modules/@electron/get": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@electron/get/-/get-2.0.3.tgz",
@@ -830,6 +774,31 @@
"global-agent": "^3.0.0"
}
},
+ "node_modules/@electron/get/node_modules/fs-extra": {
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz",
+ "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^4.0.0",
+ "universalify": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=6 <7 || >=8"
+ }
+ },
+ "node_modules/@electron/get/node_modules/jsonfile": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+ "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==",
+ "dev": true,
+ "license": "MIT",
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
"node_modules/@electron/get/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
@@ -840,6 +809,16 @@
"semver": "bin/semver.js"
}
},
+ "node_modules/@electron/get/node_modules/universalify": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+ "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
"node_modules/@electron/node-gyp": {
"version": "10.2.0-electron.1",
"resolved": "git+ssh://git@github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2",
@@ -865,581 +844,99 @@
"node": ">=12.13.0"
}
},
- "node_modules/@electron/node-gyp/node_modules/@npmcli/fs": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-2.1.2.tgz",
- "integrity": "sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "@gar/promisify": "^1.1.3",
- "semver": "^7.3.5"
- },
- "engines": {
- "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/abbrev": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
- "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
- "dev": true,
- "license": "ISC"
- },
- "node_modules/@electron/node-gyp/node_modules/agent-base": {
- "version": "6.0.2",
- "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
- "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
+ "node_modules/@electron/notarize": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/@electron/notarize/-/notarize-2.5.0.tgz",
+ "integrity": "sha512-jNT8nwH1f9X5GEITXaQ8IF/KdskvIkOFfB2CvwumsveVidzpSc+mvhhTMdAGSYF3O+Nq49lJ7y+ssODRXu06+A==",
"dev": true,
"license": "MIT",
"dependencies": {
- "debug": "4"
+ "debug": "^4.1.1",
+ "fs-extra": "^9.0.1",
+ "promise-retry": "^2.0.1"
},
"engines": {
- "node": ">= 6.0.0"
+ "node": ">= 10.0.0"
}
},
- "node_modules/@electron/node-gyp/node_modules/brace-expansion": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
- "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
+ "node_modules/@electron/notarize/node_modules/fs-extra": {
+ "version": "9.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz",
+ "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "balanced-match": "^1.0.0"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/cacache": {
- "version": "16.1.3",
- "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.1.3.tgz",
- "integrity": "sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "@npmcli/fs": "^2.1.0",
- "@npmcli/move-file": "^2.0.0",
- "chownr": "^2.0.0",
- "fs-minipass": "^2.1.0",
- "glob": "^8.0.1",
- "infer-owner": "^1.0.4",
- "lru-cache": "^7.7.1",
- "minipass": "^3.1.6",
- "minipass-collect": "^1.0.2",
- "minipass-flush": "^1.0.5",
- "minipass-pipeline": "^1.2.4",
- "mkdirp": "^1.0.4",
- "p-map": "^4.0.0",
- "promise-inflight": "^1.0.1",
- "rimraf": "^3.0.2",
- "ssri": "^9.0.0",
- "tar": "^6.1.11",
- "unique-filename": "^2.0.0"
+ "at-least-node": "^1.0.0",
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
},
"engines": {
- "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
+ "node": ">=10"
}
},
- "node_modules/@electron/node-gyp/node_modules/fs-minipass": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz",
- "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==",
+ "node_modules/@electron/osx-sign": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/@electron/osx-sign/-/osx-sign-1.3.1.tgz",
+ "integrity": "sha512-BAfviURMHpmb1Yb50YbCxnOY0wfwaLXH5KJ4+80zS0gUkzDX3ec23naTlEqKsN+PwYn+a1cCzM7BJ4Wcd3sGzw==",
"dev": true,
- "license": "ISC",
+ "license": "BSD-2-Clause",
"dependencies": {
- "minipass": "^3.0.0"
+ "compare-version": "^0.1.2",
+ "debug": "^4.3.4",
+ "fs-extra": "^10.0.0",
+ "isbinaryfile": "^4.0.8",
+ "minimist": "^1.2.6",
+ "plist": "^3.0.5"
+ },
+ "bin": {
+ "electron-osx-flat": "bin/electron-osx-flat.js",
+ "electron-osx-sign": "bin/electron-osx-sign.js"
},
"engines": {
- "node": ">= 8"
+ "node": ">=12.0.0"
}
},
- "node_modules/@electron/node-gyp/node_modules/glob": {
- "version": "8.1.0",
- "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz",
- "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==",
- "deprecated": "Glob versions prior to v9 are no longer supported",
+ "node_modules/@electron/osx-sign/node_modules/isbinaryfile": {
+ "version": "4.0.10",
+ "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.10.tgz",
+ "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==",
"dev": true,
- "license": "ISC",
- "dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^5.0.1",
- "once": "^1.3.0"
- },
+ "license": "MIT",
"engines": {
- "node": ">=12"
+ "node": ">= 8.0.0"
},
"funding": {
- "url": "https://github.com/sponsors/isaacs"
+ "url": "https://github.com/sponsors/gjtorikian/"
}
},
- "node_modules/@electron/node-gyp/node_modules/http-proxy-agent": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz",
- "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==",
+ "node_modules/@electron/rebuild": {
+ "version": "3.7.2",
+ "resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-3.7.2.tgz",
+ "integrity": "sha512-19/KbIR/DAxbsCkiaGMXIdPnMCJLkcf8AvGnduJtWBs/CBwiAjY1apCqOLVxrXg+rtXFCngbXhBanWjxLUt1Mg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@tootallnate/once": "2",
- "agent-base": "6",
- "debug": "4"
+ "@electron/node-gyp": "git+https://github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2",
+ "@malept/cross-spawn-promise": "^2.0.0",
+ "chalk": "^4.0.0",
+ "debug": "^4.1.1",
+ "detect-libc": "^2.0.1",
+ "fs-extra": "^10.0.0",
+ "got": "^11.7.0",
+ "node-abi": "^3.45.0",
+ "node-api-version": "^0.2.0",
+ "ora": "^5.1.0",
+ "read-binary-file-arch": "^1.0.6",
+ "semver": "^7.3.5",
+ "tar": "^6.0.5",
+ "yargs": "^17.0.1"
+ },
+ "bin": {
+ "electron-rebuild": "lib/cli.js"
},
"engines": {
- "node": ">= 6"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/https-proxy-agent": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz",
- "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "agent-base": "6",
- "debug": "4"
- },
- "engines": {
- "node": ">= 6"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/lru-cache": {
- "version": "7.18.3",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
- "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/make-fetch-happen": {
- "version": "10.2.1",
- "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz",
- "integrity": "sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "agentkeepalive": "^4.2.1",
- "cacache": "^16.1.0",
- "http-cache-semantics": "^4.1.0",
- "http-proxy-agent": "^5.0.0",
- "https-proxy-agent": "^5.0.0",
- "is-lambda": "^1.0.1",
- "lru-cache": "^7.7.1",
- "minipass": "^3.1.6",
- "minipass-collect": "^1.0.2",
- "minipass-fetch": "^2.0.3",
- "minipass-flush": "^1.0.5",
- "minipass-pipeline": "^1.2.4",
- "negotiator": "^0.6.3",
- "promise-retry": "^2.0.1",
- "socks-proxy-agent": "^7.0.0",
- "ssri": "^9.0.0"
- },
- "engines": {
- "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/minimatch": {
- "version": "5.1.6",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz",
- "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "brace-expansion": "^2.0.1"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/minipass": {
- "version": "3.3.6",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz",
- "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "yallist": "^4.0.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/minipass-collect": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz",
- "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "minipass": "^3.0.0"
- },
- "engines": {
- "node": ">= 8"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/minipass-fetch": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.1.2.tgz",
- "integrity": "sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "minipass": "^3.1.6",
- "minipass-sized": "^1.0.3",
- "minizlib": "^2.1.2"
- },
- "engines": {
- "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
- },
- "optionalDependencies": {
- "encoding": "^0.1.13"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/minizlib": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz",
- "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "minipass": "^3.0.0",
- "yallist": "^4.0.0"
- },
- "engines": {
- "node": ">= 8"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/negotiator": {
- "version": "0.6.4",
- "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz",
- "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 0.6"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/nopt": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/nopt/-/nopt-6.0.0.tgz",
- "integrity": "sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "abbrev": "^1.0.0"
- },
- "bin": {
- "nopt": "bin/nopt.js"
- },
- "engines": {
- "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/p-map": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz",
- "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "aggregate-error": "^3.0.0"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/proc-log": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-2.0.1.tgz",
- "integrity": "sha512-Kcmo2FhfDTXdcbfDH76N7uBYHINxc/8GW7UAVuVP9I+Va3uHSerrnKV6dLooga/gh7GlgzuCCr/eoldnL1muGw==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/rimraf": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
- "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
- "deprecated": "Rimraf versions prior to v4 are no longer supported",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "glob": "^7.1.3"
- },
- "bin": {
- "rimraf": "bin.js"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/brace-expansion": {
- "version": "1.1.12",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
- "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "balanced-match": "^1.0.0",
- "concat-map": "0.0.1"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/glob": {
- "version": "7.2.3",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
- "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
- "deprecated": "Glob versions prior to v9 are no longer supported",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.1.1",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
- },
- "engines": {
- "node": "*"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/rimraf/node_modules/minimatch": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
- "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "brace-expansion": "^1.1.7"
- },
- "engines": {
- "node": "*"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/socks-proxy-agent": {
- "version": "7.0.0",
- "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz",
- "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "agent-base": "^6.0.2",
- "debug": "^4.3.3",
- "socks": "^2.6.2"
- },
- "engines": {
- "node": ">= 10"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/ssri": {
- "version": "9.0.1",
- "resolved": "https://registry.npmjs.org/ssri/-/ssri-9.0.1.tgz",
- "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "minipass": "^3.1.1"
- },
- "engines": {
- "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/unique-filename": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-2.0.1.tgz",
- "integrity": "sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "unique-slug": "^3.0.0"
- },
- "engines": {
- "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/unique-slug": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-3.0.0.tgz",
- "integrity": "sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "imurmurhash": "^0.1.4"
- },
- "engines": {
- "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
- }
- },
- "node_modules/@electron/node-gyp/node_modules/yallist": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
- "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
- "dev": true,
- "license": "ISC"
- },
- "node_modules/@electron/notarize": {
- "version": "2.5.0",
- "resolved": "https://registry.npmjs.org/@electron/notarize/-/notarize-2.5.0.tgz",
- "integrity": "sha512-jNT8nwH1f9X5GEITXaQ8IF/KdskvIkOFfB2CvwumsveVidzpSc+mvhhTMdAGSYF3O+Nq49lJ7y+ssODRXu06+A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "debug": "^4.1.1",
- "fs-extra": "^9.0.1",
- "promise-retry": "^2.0.1"
- },
- "engines": {
- "node": ">= 10.0.0"
- }
- },
- "node_modules/@electron/notarize/node_modules/fs-extra": {
- "version": "9.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz",
- "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "at-least-node": "^1.0.0",
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/@electron/notarize/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/@electron/notarize/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
- "node_modules/@electron/osx-sign": {
- "version": "1.3.1",
- "resolved": "https://registry.npmjs.org/@electron/osx-sign/-/osx-sign-1.3.1.tgz",
- "integrity": "sha512-BAfviURMHpmb1Yb50YbCxnOY0wfwaLXH5KJ4+80zS0gUkzDX3ec23naTlEqKsN+PwYn+a1cCzM7BJ4Wcd3sGzw==",
- "dev": true,
- "license": "BSD-2-Clause",
- "dependencies": {
- "compare-version": "^0.1.2",
- "debug": "^4.3.4",
- "fs-extra": "^10.0.0",
- "isbinaryfile": "^4.0.8",
- "minimist": "^1.2.6",
- "plist": "^3.0.5"
- },
- "bin": {
- "electron-osx-flat": "bin/electron-osx-flat.js",
- "electron-osx-sign": "bin/electron-osx-sign.js"
- },
- "engines": {
- "node": ">=12.0.0"
- }
- },
- "node_modules/@electron/osx-sign/node_modules/fs-extra": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
- "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@electron/osx-sign/node_modules/isbinaryfile": {
- "version": "4.0.10",
- "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-4.0.10.tgz",
- "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 8.0.0"
- },
- "funding": {
- "url": "https://github.com/sponsors/gjtorikian/"
- }
- },
- "node_modules/@electron/osx-sign/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/@electron/osx-sign/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
- "node_modules/@electron/rebuild": {
- "version": "4.0.2",
- "resolved": "https://registry.npmjs.org/@electron/rebuild/-/rebuild-4.0.2.tgz",
- "integrity": "sha512-8iZWVPvOpCdIc5Pj5udQV3PeO7liJVC7BBUSizl1HCfP7ZxYc9Kqz0c3PDNj2HQ5cQfJ5JaBeJIYKPjAvLn2Rg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@malept/cross-spawn-promise": "^2.0.0",
- "debug": "^4.1.1",
- "detect-libc": "^2.0.1",
- "got": "^11.7.0",
- "graceful-fs": "^4.2.11",
- "node-abi": "^4.2.0",
- "node-api-version": "^0.2.1",
- "node-gyp": "^11.2.0",
- "ora": "^5.1.0",
- "read-binary-file-arch": "^1.0.6",
- "semver": "^7.3.5",
- "tar": "^6.0.5",
- "yargs": "^17.0.1"
- },
- "bin": {
- "electron-rebuild": "lib/cli.js"
- },
- "engines": {
- "node": ">=22.12.0"
+ "node": ">=12.13.0"
}
},
"node_modules/@electron/universal": {
@@ -1472,9 +969,9 @@
}
},
"node_modules/@electron/universal/node_modules/fs-extra": {
- "version": "11.3.3",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.3.tgz",
- "integrity": "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==",
+ "version": "11.3.2",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz",
+ "integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1486,19 +983,6 @@
"node": ">=14.14"
}
},
- "node_modules/@electron/universal/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
"node_modules/@electron/universal/node_modules/minimatch": {
"version": "9.0.5",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
@@ -1515,16 +999,6 @@
"url": "https://github.com/sponsors/isaacs"
}
},
- "node_modules/@electron/universal/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
"node_modules/@electron/windows-sign": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@electron/windows-sign/-/windows-sign-1.2.2.tgz",
@@ -1548,56 +1022,22 @@
}
},
"node_modules/@electron/windows-sign/node_modules/fs-extra": {
- "version": "11.3.3",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.3.tgz",
- "integrity": "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==",
- "dev": true,
- "license": "MIT",
- "optional": true,
- "peer": true,
- "dependencies": {
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=14.14"
- }
- },
- "node_modules/@electron/windows-sign/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "optional": true,
- "peer": true,
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/@electron/windows-sign/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
+ "version": "11.3.2",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz",
+ "integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==",
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
+ "dependencies": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ },
"engines": {
- "node": ">= 10.0.0"
+ "node": ">=14.14"
}
},
- "node_modules/@epic-web/invariant": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/@epic-web/invariant/-/invariant-1.0.0.tgz",
- "integrity": "sha512-lrTPqgvfFQtR/eY/qkIzp98OGdNJu0m5ji3q/nJI8v3SXkRKEnWiOxMmbvcSoAIzv/cGiuvRy57k4suKQSAdwA==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/@esbuild/aix-ppc64": {
"version": "0.25.12",
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz",
@@ -2041,9 +1481,9 @@
}
},
"node_modules/@eslint-community/eslint-utils": {
- "version": "4.9.1",
- "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz",
- "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==",
+ "version": "4.9.0",
+ "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz",
+ "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -2223,24 +1663,6 @@
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
}
},
- "node_modules/@exodus/bytes": {
- "version": "1.7.0",
- "resolved": "https://registry.npmjs.org/@exodus/bytes/-/bytes-1.7.0.tgz",
- "integrity": "sha512-5i+BtvujK/vM07YCGDyz4C4AyDzLmhxHMtM5HpUyPRtJPBdFPsj290ffXW+UXY21/G7GtXeHD2nRmq0T1ShyQQ==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": "^20.19.0 || ^22.12.0 || >=24.0.0"
- },
- "peerDependencies": {
- "@exodus/crypto": "^1.0.0-rc.4"
- },
- "peerDependenciesMeta": {
- "@exodus/crypto": {
- "optional": true
- }
- }
- },
"node_modules/@floating-ui/core": {
"version": "1.7.3",
"resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.3.tgz",
@@ -2379,6 +1801,19 @@
"node": ">=12"
}
},
+ "node_modules/@isaacs/cliui/node_modules/ansi-regex": {
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
+ "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ }
+ },
"node_modules/@isaacs/cliui/node_modules/ansi-styles": {
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
@@ -2417,6 +1852,22 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/@isaacs/cliui/node_modules/strip-ansi": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
+ "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-regex": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/strip-ansi?sponsor=1"
+ }
+ },
"node_modules/@isaacs/cliui/node_modules/wrap-ansi": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
@@ -2435,19 +1886,6 @@
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
- "node_modules/@isaacs/fs-minipass": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz",
- "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "minipass": "^7.0.4"
- },
- "engines": {
- "node": ">=18.0.0"
- }
- },
"node_modules/@jridgewell/gen-mapping": {
"version": "0.3.13",
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
@@ -2645,64 +2083,18 @@
"node": ">=10"
}
},
- "node_modules/@malept/flatpak-bundler/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/@malept/flatpak-bundler/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
- "node_modules/@npmcli/agent": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/@npmcli/agent/-/agent-3.0.0.tgz",
- "integrity": "sha512-S79NdEgDQd/NGCay6TCoVzXSj74skRZIKJcpJjC5lOq34SZzyI6MqtiiWoiVWoVrTcGjNeC4ipbh1VIHlpfF5Q==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "agent-base": "^7.1.0",
- "http-proxy-agent": "^7.0.0",
- "https-proxy-agent": "^7.0.1",
- "lru-cache": "^10.0.1",
- "socks-proxy-agent": "^8.0.3"
- },
- "engines": {
- "node": "^18.17.0 || >=20.5.0"
- }
- },
- "node_modules/@npmcli/agent/node_modules/lru-cache": {
- "version": "10.4.3",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz",
- "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==",
- "dev": true,
- "license": "ISC"
- },
"node_modules/@npmcli/fs": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-4.0.0.tgz",
- "integrity": "sha512-/xGlezI6xfGO9NwuJlnwz/K14qD1kCSAGtacBHnGzeAIuJGazcp45KP5NuyARXoKb7cwulAGWVsbeSxdG/cb0Q==",
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-2.1.2.tgz",
+ "integrity": "sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ==",
"dev": true,
"license": "ISC",
"dependencies": {
+ "@gar/promisify": "^1.1.3",
"semver": "^7.3.5"
},
"engines": {
- "node": "^18.17.0 || >=20.5.0"
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/@npmcli/move-file": {
@@ -2720,23 +2112,6 @@
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
- "node_modules/@npmcli/move-file/node_modules/rimraf": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
- "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
- "deprecated": "Rimraf versions prior to v4 are no longer supported",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "glob": "^7.1.3"
- },
- "bin": {
- "rimraf": "bin.js"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/@pkgjs/parseargs": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
@@ -3995,9 +3370,9 @@
"license": "MIT"
},
"node_modules/@rollup/rollup-android-arm-eabi": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.54.0.tgz",
- "integrity": "sha512-OywsdRHrFvCdvsewAInDKCNyR3laPA2mc9bRYJ6LBp5IyvF3fvXbbNR0bSzHlZVFtn6E0xw2oZlyjg4rKCVcng==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.4.tgz",
+ "integrity": "sha512-PWU3Y92H4DD0bOqorEPp1Y0tbzwAurFmIYpjcObv5axGVOtcTlB0b2UKMd2echo08MgN7jO8WQZSSysvfisFSQ==",
"cpu": [
"arm"
],
@@ -4009,9 +3384,9 @@
]
},
"node_modules/@rollup/rollup-android-arm64": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.54.0.tgz",
- "integrity": "sha512-Skx39Uv+u7H224Af+bDgNinitlmHyQX1K/atIA32JP3JQw6hVODX5tkbi2zof/E69M1qH2UoN3Xdxgs90mmNYw==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.4.tgz",
+ "integrity": "sha512-Gw0/DuVm3rGsqhMGYkSOXXIx20cC3kTlivZeuaGt4gEgILivykNyBWxeUV5Cf2tDA2nPLah26vq3emlRrWVbng==",
"cpu": [
"arm64"
],
@@ -4023,9 +3398,9 @@
]
},
"node_modules/@rollup/rollup-darwin-arm64": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.54.0.tgz",
- "integrity": "sha512-k43D4qta/+6Fq+nCDhhv9yP2HdeKeP56QrUUTW7E6PhZP1US6NDqpJj4MY0jBHlJivVJD5P8NxrjuobZBJTCRw==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.4.tgz",
+ "integrity": "sha512-+w06QvXsgzKwdVg5qRLZpTHh1bigHZIqoIUPtiqh05ZiJVUQ6ymOxaPkXTvRPRLH88575ZCRSRM3PwIoNma01Q==",
"cpu": [
"arm64"
],
@@ -4037,9 +3412,9 @@
]
},
"node_modules/@rollup/rollup-darwin-x64": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.54.0.tgz",
- "integrity": "sha512-cOo7biqwkpawslEfox5Vs8/qj83M/aZCSSNIWpVzfU2CYHa2G3P1UN5WF01RdTHSgCkri7XOlTdtk17BezlV3A==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.4.tgz",
+ "integrity": "sha512-EB4Na9G2GsrRNRNFPuxfwvDRDUwQEzJPpiK1vo2zMVhEeufZ1k7J1bKnT0JYDfnPC7RNZ2H5YNQhW6/p2QKATw==",
"cpu": [
"x64"
],
@@ -4051,9 +3426,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-arm64": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.54.0.tgz",
- "integrity": "sha512-miSvuFkmvFbgJ1BevMa4CPCFt5MPGw094knM64W9I0giUIMMmRYcGW/JWZDriaw/k1kOBtsWh1z6nIFV1vPNtA==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.4.tgz",
+ "integrity": "sha512-bldA8XEqPcs6OYdknoTMaGhjytnwQ0NClSPpWpmufOuGPN5dDmvIa32FygC2gneKK4A1oSx86V1l55hyUWUYFQ==",
"cpu": [
"arm64"
],
@@ -4065,9 +3440,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-x64": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.54.0.tgz",
- "integrity": "sha512-KGXIs55+b/ZfZsq9aR026tmr/+7tq6VG6MsnrvF4H8VhwflTIuYh+LFUlIsRdQSgrgmtM3fVATzEAj4hBQlaqQ==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.4.tgz",
+ "integrity": "sha512-3T8GPjH6mixCd0YPn0bXtcuSXi1Lj+15Ujw2CEb7dd24j9thcKscCf88IV7n76WaAdorOzAgSSbuVRg4C8V8Qw==",
"cpu": [
"x64"
],
@@ -4079,9 +3454,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.54.0.tgz",
- "integrity": "sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.4.tgz",
+ "integrity": "sha512-UPMMNeC4LXW7ZSHxeP3Edv09aLsFUMaD1TSVW6n1CWMECnUIJMFFB7+XC2lZTdPtvB36tYC0cJWc86mzSsaviw==",
"cpu": [
"arm"
],
@@ -4093,9 +3468,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.54.0.tgz",
- "integrity": "sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.4.tgz",
+ "integrity": "sha512-H8uwlV0otHs5Q7WAMSoyvjV9DJPiy5nJ/xnHolY0QptLPjaSsuX7tw+SPIfiYH6cnVx3fe4EWFafo6gH6ekZKA==",
"cpu": [
"arm"
],
@@ -4107,9 +3482,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-gnu": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.54.0.tgz",
- "integrity": "sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.4.tgz",
+ "integrity": "sha512-BLRwSRwICXz0TXkbIbqJ1ibK+/dSBpTJqDClF61GWIrxTXZWQE78ROeIhgl5MjVs4B4gSLPCFeD4xML9vbzvCQ==",
"cpu": [
"arm64"
],
@@ -4121,9 +3496,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-musl": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.54.0.tgz",
- "integrity": "sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.4.tgz",
+ "integrity": "sha512-6bySEjOTbmVcPJAywjpGLckK793A0TJWSbIa0sVwtVGfe/Nz6gOWHOwkshUIAp9j7wg2WKcA4Snu7Y1nUZyQew==",
"cpu": [
"arm64"
],
@@ -4135,9 +3510,9 @@
]
},
"node_modules/@rollup/rollup-linux-loong64-gnu": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.54.0.tgz",
- "integrity": "sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.4.tgz",
+ "integrity": "sha512-U0ow3bXYJZ5MIbchVusxEycBw7bO6C2u5UvD31i5IMTrnt2p4Fh4ZbHSdc/31TScIJQYHwxbj05BpevB3201ug==",
"cpu": [
"loong64"
],
@@ -4149,9 +3524,9 @@
]
},
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.54.0.tgz",
- "integrity": "sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.4.tgz",
+ "integrity": "sha512-iujDk07ZNwGLVn0YIWM80SFN039bHZHCdCCuX9nyx3Jsa2d9V/0Y32F+YadzwbvDxhSeVo9zefkoPnXEImnM5w==",
"cpu": [
"ppc64"
],
@@ -4163,9 +3538,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.54.0.tgz",
- "integrity": "sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.4.tgz",
+ "integrity": "sha512-MUtAktiOUSu+AXBpx1fkuG/Bi5rhlorGs3lw5QeJ2X3ziEGAq7vFNdWVde6XGaVqi0LGSvugwjoxSNJfHFTC0g==",
"cpu": [
"riscv64"
],
@@ -4177,9 +3552,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-musl": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.54.0.tgz",
- "integrity": "sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.4.tgz",
+ "integrity": "sha512-btm35eAbDfPtcFEgaXCI5l3c2WXyzwiE8pArhd66SDtoLWmgK5/M7CUxmUglkwtniPzwvWioBKKl6IXLbPf2sQ==",
"cpu": [
"riscv64"
],
@@ -4191,9 +3566,9 @@
]
},
"node_modules/@rollup/rollup-linux-s390x-gnu": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.54.0.tgz",
- "integrity": "sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.4.tgz",
+ "integrity": "sha512-uJlhKE9ccUTCUlK+HUz/80cVtx2RayadC5ldDrrDUFaJK0SNb8/cCmC9RhBhIWuZ71Nqj4Uoa9+xljKWRogdhA==",
"cpu": [
"s390x"
],
@@ -4205,9 +3580,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-gnu": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.54.0.tgz",
- "integrity": "sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.4.tgz",
+ "integrity": "sha512-jjEMkzvASQBbzzlzf4os7nzSBd/cvPrpqXCUOqoeCh1dQ4BP3RZCJk8XBeik4MUln3m+8LeTJcY54C/u8wb3DQ==",
"cpu": [
"x64"
],
@@ -4219,9 +3594,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-musl": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.54.0.tgz",
- "integrity": "sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.4.tgz",
+ "integrity": "sha512-lu90KG06NNH19shC5rBPkrh6mrTpq5kviFylPBXQVpdEu0yzb0mDgyxLr6XdcGdBIQTH/UAhDJnL+APZTBu1aQ==",
"cpu": [
"x64"
],
@@ -4233,9 +3608,9 @@
]
},
"node_modules/@rollup/rollup-openharmony-arm64": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.54.0.tgz",
- "integrity": "sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.4.tgz",
+ "integrity": "sha512-dFDcmLwsUzhAm/dn0+dMOQZoONVYBtgik0VuY/d5IJUUb787L3Ko/ibvTvddqhb3RaB7vFEozYevHN4ox22R/w==",
"cpu": [
"arm64"
],
@@ -4247,9 +3622,9 @@
]
},
"node_modules/@rollup/rollup-win32-arm64-msvc": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.54.0.tgz",
- "integrity": "sha512-c2V0W1bsKIKfbLMBu/WGBz6Yci8nJ/ZJdheE0EwB73N3MvHYKiKGs3mVilX4Gs70eGeDaMqEob25Tw2Gb9Nqyw==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.4.tgz",
+ "integrity": "sha512-WvUpUAWmUxZKtRnQWpRKnLW2DEO8HB/l8z6oFFMNuHndMzFTJEXzaYJ5ZAmzNw0L21QQJZsUQFt2oPf3ykAD/w==",
"cpu": [
"arm64"
],
@@ -4261,9 +3636,9 @@
]
},
"node_modules/@rollup/rollup-win32-ia32-msvc": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.54.0.tgz",
- "integrity": "sha512-woEHgqQqDCkAzrDhvDipnSirm5vxUXtSKDYTVpZG3nUdW/VVB5VdCYA2iReSj/u3yCZzXID4kuKG7OynPnB3WQ==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.4.tgz",
+ "integrity": "sha512-JGbeF2/FDU0x2OLySw/jgvkwWUo05BSiJK0dtuI4LyuXbz3wKiC1xHhLB1Tqm5VU6ZZDmAorj45r/IgWNWku5g==",
"cpu": [
"ia32"
],
@@ -4275,9 +3650,9 @@
]
},
"node_modules/@rollup/rollup-win32-x64-gnu": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.54.0.tgz",
- "integrity": "sha512-dzAc53LOuFvHwbCEOS0rPbXp6SIhAf2txMP5p6mGyOXXw5mWY8NGGbPMPrs4P1WItkfApDathBj/NzMLUZ9rtQ==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.4.tgz",
+ "integrity": "sha512-zuuC7AyxLWLubP+mlUwEyR8M1ixW1ERNPHJfXm8x7eQNP4Pzkd7hS3qBuKBR70VRiQ04Kw8FNfRMF5TNxuZq2g==",
"cpu": [
"x64"
],
@@ -4289,9 +3664,9 @@
]
},
"node_modules/@rollup/rollup-win32-x64-msvc": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.54.0.tgz",
- "integrity": "sha512-hYT5d3YNdSh3mbCU1gwQyPgQd3T2ne0A3KG8KSBdav5TiBg6eInVmV+TeR5uHufiIgSFg0XsOWGW5/RhNcSvPg==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.4.tgz",
+ "integrity": "sha512-Sbx45u/Lbb5RyptSbX7/3deP+/lzEmZ0BTSHxwxN/IMOZDZf8S0AGo0hJD5n/LQssxb5Z3B4og4P2X6Dd8acCA==",
"cpu": [
"x64"
],
@@ -4316,9 +3691,9 @@
}
},
"node_modules/@standard-schema/spec": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz",
- "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==",
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz",
+ "integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==",
"dev": true,
"license": "MIT"
},
@@ -4558,66 +3933,6 @@
"node": ">=14.0.0"
}
},
- "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": {
- "version": "1.7.1",
- "dev": true,
- "inBundle": true,
- "license": "MIT",
- "optional": true,
- "dependencies": {
- "@emnapi/wasi-threads": "1.1.0",
- "tslib": "^2.4.0"
- }
- },
- "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": {
- "version": "1.7.1",
- "dev": true,
- "inBundle": true,
- "license": "MIT",
- "optional": true,
- "dependencies": {
- "tslib": "^2.4.0"
- }
- },
- "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": {
- "version": "1.1.0",
- "dev": true,
- "inBundle": true,
- "license": "MIT",
- "optional": true,
- "dependencies": {
- "tslib": "^2.4.0"
- }
- },
- "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": {
- "version": "1.1.0",
- "dev": true,
- "inBundle": true,
- "license": "MIT",
- "optional": true,
- "dependencies": {
- "@emnapi/core": "^1.7.1",
- "@emnapi/runtime": "^1.7.1",
- "@tybys/wasm-util": "^0.10.1"
- }
- },
- "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": {
- "version": "0.10.1",
- "dev": true,
- "inBundle": true,
- "license": "MIT",
- "optional": true,
- "dependencies": {
- "tslib": "^2.4.0"
- }
- },
- "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": {
- "version": "2.8.1",
- "dev": true,
- "inBundle": true,
- "license": "0BSD",
- "optional": true
- },
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
"version": "4.1.18",
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.18.tgz",
@@ -4679,12 +3994,12 @@
}
},
"node_modules/@tanstack/react-virtual": {
- "version": "3.13.14",
- "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.14.tgz",
- "integrity": "sha512-WG0d7mBD54eA7dgA3+sO5csS0B49QKqM6Gy5Rf31+Oq/LTKROQSao9m2N/vz1IqVragOKU5t5k1LAcqh/DfTxw==",
+ "version": "3.13.13",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.13.13.tgz",
+ "integrity": "sha512-4o6oPMDvQv+9gMi8rE6gWmsOjtUZUYIJHv7EB+GblyYdi8U6OqLl8rhHWIUZSL1dUU2dPwTdTgybCKf9EjIrQg==",
"license": "MIT",
"dependencies": {
- "@tanstack/virtual-core": "3.13.14"
+ "@tanstack/virtual-core": "3.13.13"
},
"funding": {
"type": "github",
@@ -4696,9 +4011,9 @@
}
},
"node_modules/@tanstack/virtual-core": {
- "version": "3.13.14",
- "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.14.tgz",
- "integrity": "sha512-b5Uvd8J2dc7ICeX9SRb/wkCxWk7pUwN214eEPAQsqrsktSKTCmyLxOQWSMgogBByXclZeAdgZ3k4o0fIYUIBqQ==",
+ "version": "3.13.13",
+ "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.13.13.tgz",
+ "integrity": "sha512-uQFoSdKKf5S8k51W5t7b2qpfkyIbdHMzAn+AMQvHPxKUPeo1SsGaA4JRISQT87jm28b7z8OEqPcg1IOZagQHcA==",
"license": "MIT",
"funding": {
"type": "github",
@@ -4726,6 +4041,33 @@
"node": ">=18"
}
},
+ "node_modules/@testing-library/jest-dom": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.9.1.tgz",
+ "integrity": "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@adobe/css-tools": "^4.4.0",
+ "aria-query": "^5.0.0",
+ "css.escape": "^1.5.1",
+ "dom-accessibility-api": "^0.6.3",
+ "picocolors": "^1.1.1",
+ "redent": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=14",
+ "npm": ">=6",
+ "yarn": ">=1"
+ }
+ },
+ "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz",
+ "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/@testing-library/react": {
"version": "16.3.1",
"resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.1.tgz",
@@ -4931,9 +4273,9 @@
"license": "MIT"
},
"node_modules/@types/node": {
- "version": "25.0.3",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.3.tgz",
- "integrity": "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA==",
+ "version": "25.0.2",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.2.tgz",
+ "integrity": "sha512-gWEkeiyYE4vqjON/+Obqcoeffmk0NF15WSBwSs7zwVA2bAbTaE0SJ7P0WNGoJn8uE7fiaV5a7dKYIJriEqOrmA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -5021,20 +4363,20 @@
}
},
"node_modules/@typescript-eslint/eslint-plugin": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.51.0.tgz",
- "integrity": "sha512-XtssGWJvypyM2ytBnSnKtHYOGT+4ZwTnBVl36TA4nRO2f4PRNGz5/1OszHzcZCvcBMh+qb7I06uoCmLTRdR9og==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.49.0.tgz",
+ "integrity": "sha512-JXij0vzIaTtCwu6SxTh8qBc66kmf1xs7pI4UOiMDFVct6q86G0Zs7KRcEoJgY3Cav3x5Tq0MF5jwgpgLqgKG3A==",
"dev": true,
"license": "MIT",
"dependencies": {
"@eslint-community/regexpp": "^4.10.0",
- "@typescript-eslint/scope-manager": "8.51.0",
- "@typescript-eslint/type-utils": "8.51.0",
- "@typescript-eslint/utils": "8.51.0",
- "@typescript-eslint/visitor-keys": "8.51.0",
+ "@typescript-eslint/scope-manager": "8.49.0",
+ "@typescript-eslint/type-utils": "8.49.0",
+ "@typescript-eslint/utils": "8.49.0",
+ "@typescript-eslint/visitor-keys": "8.49.0",
"ignore": "^7.0.0",
"natural-compare": "^1.4.0",
- "ts-api-utils": "^2.2.0"
+ "ts-api-utils": "^2.1.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -5044,7 +4386,7 @@
"url": "https://opencollective.com/typescript-eslint"
},
"peerDependencies": {
- "@typescript-eslint/parser": "^8.51.0",
+ "@typescript-eslint/parser": "^8.49.0",
"eslint": "^8.57.0 || ^9.0.0",
"typescript": ">=4.8.4 <6.0.0"
}
@@ -5060,16 +4402,16 @@
}
},
"node_modules/@typescript-eslint/parser": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.51.0.tgz",
- "integrity": "sha512-3xP4XzzDNQOIqBMWogftkwxhg5oMKApqY0BAflmLZiFYHqyhSOxv/cd/zPQLTcCXr4AkaKb25joocY0BD1WC6A==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.49.0.tgz",
+ "integrity": "sha512-N9lBGA9o9aqb1hVMc9hzySbhKibHmB+N3IpoShyV6HyQYRGIhlrO5rQgttypi+yEeKsKI4idxC8Jw6gXKD4THA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@typescript-eslint/scope-manager": "8.51.0",
- "@typescript-eslint/types": "8.51.0",
- "@typescript-eslint/typescript-estree": "8.51.0",
- "@typescript-eslint/visitor-keys": "8.51.0",
+ "@typescript-eslint/scope-manager": "8.49.0",
+ "@typescript-eslint/types": "8.49.0",
+ "@typescript-eslint/typescript-estree": "8.49.0",
+ "@typescript-eslint/visitor-keys": "8.49.0",
"debug": "^4.3.4"
},
"engines": {
@@ -5085,14 +4427,14 @@
}
},
"node_modules/@typescript-eslint/project-service": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.51.0.tgz",
- "integrity": "sha512-Luv/GafO07Z7HpiI7qeEW5NW8HUtZI/fo/kE0YbtQEFpJRUuR0ajcWfCE5bnMvL7QQFrmT/odMe8QZww8X2nfQ==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.49.0.tgz",
+ "integrity": "sha512-/wJN0/DKkmRUMXjZUXYZpD1NEQzQAAn9QWfGwo+Ai8gnzqH7tvqS7oNVdTjKqOcPyVIdZdyCMoqN66Ia789e7g==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@typescript-eslint/tsconfig-utils": "^8.51.0",
- "@typescript-eslint/types": "^8.51.0",
+ "@typescript-eslint/tsconfig-utils": "^8.49.0",
+ "@typescript-eslint/types": "^8.49.0",
"debug": "^4.3.4"
},
"engines": {
@@ -5107,14 +4449,14 @@
}
},
"node_modules/@typescript-eslint/scope-manager": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.51.0.tgz",
- "integrity": "sha512-JhhJDVwsSx4hiOEQPeajGhCWgBMBwVkxC/Pet53EpBVs7zHHtayKefw1jtPaNRXpI9RA2uocdmpdfE7T+NrizA==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.49.0.tgz",
+ "integrity": "sha512-npgS3zi+/30KSOkXNs0LQXtsg9ekZ8OISAOLGWA/ZOEn0ZH74Ginfl7foziV8DT+D98WfQ5Kopwqb/PZOaIJGg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.51.0",
- "@typescript-eslint/visitor-keys": "8.51.0"
+ "@typescript-eslint/types": "8.49.0",
+ "@typescript-eslint/visitor-keys": "8.49.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -5125,9 +4467,9 @@
}
},
"node_modules/@typescript-eslint/tsconfig-utils": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.51.0.tgz",
- "integrity": "sha512-Qi5bSy/vuHeWyir2C8u/uqGMIlIDu8fuiYWv48ZGlZ/k+PRPHtaAu7erpc7p5bzw2WNNSniuxoMSO4Ar6V9OXw==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.49.0.tgz",
+ "integrity": "sha512-8prixNi1/6nawsRYxet4YOhnbW+W9FK/bQPxsGB1D3ZrDzbJ5FXw5XmzxZv82X3B+ZccuSxo/X8q9nQ+mFecWA==",
"dev": true,
"license": "MIT",
"engines": {
@@ -5142,17 +4484,17 @@
}
},
"node_modules/@typescript-eslint/type-utils": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.51.0.tgz",
- "integrity": "sha512-0XVtYzxnobc9K0VU7wRWg1yiUrw4oQzexCG2V2IDxxCxhqBMSMbjB+6o91A+Uc0GWtgjCa3Y8bi7hwI0Tu4n5Q==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.49.0.tgz",
+ "integrity": "sha512-KTExJfQ+svY8I10P4HdxKzWsvtVnsuCifU5MvXrRwoP2KOlNZ9ADNEWWsQTJgMxLzS5VLQKDjkCT/YzgsnqmZg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.51.0",
- "@typescript-eslint/typescript-estree": "8.51.0",
- "@typescript-eslint/utils": "8.51.0",
+ "@typescript-eslint/types": "8.49.0",
+ "@typescript-eslint/typescript-estree": "8.49.0",
+ "@typescript-eslint/utils": "8.49.0",
"debug": "^4.3.4",
- "ts-api-utils": "^2.2.0"
+ "ts-api-utils": "^2.1.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -5167,9 +4509,9 @@
}
},
"node_modules/@typescript-eslint/types": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.51.0.tgz",
- "integrity": "sha512-TizAvWYFM6sSscmEakjY3sPqGwxZRSywSsPEiuZF6d5GmGD9Gvlsv0f6N8FvAAA0CD06l3rIcWNbsN1e5F/9Ag==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.49.0.tgz",
+ "integrity": "sha512-e9k/fneezorUo6WShlQpMxXh8/8wfyc+biu6tnAqA81oWrEic0k21RHzP9uqqpyBBeBKu4T+Bsjy9/b8u7obXQ==",
"dev": true,
"license": "MIT",
"engines": {
@@ -5181,21 +4523,21 @@
}
},
"node_modules/@typescript-eslint/typescript-estree": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.51.0.tgz",
- "integrity": "sha512-1qNjGqFRmlq0VW5iVlcyHBbCjPB7y6SxpBkrbhNWMy/65ZoncXCEPJxkRZL8McrseNH6lFhaxCIaX+vBuFnRng==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.49.0.tgz",
+ "integrity": "sha512-jrLdRuAbPfPIdYNppHJ/D0wN+wwNfJ32YTAm10eJVsFmrVpXQnDWBn8niCSMlWjvml8jsce5E/O+86IQtTbJWA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@typescript-eslint/project-service": "8.51.0",
- "@typescript-eslint/tsconfig-utils": "8.51.0",
- "@typescript-eslint/types": "8.51.0",
- "@typescript-eslint/visitor-keys": "8.51.0",
+ "@typescript-eslint/project-service": "8.49.0",
+ "@typescript-eslint/tsconfig-utils": "8.49.0",
+ "@typescript-eslint/types": "8.49.0",
+ "@typescript-eslint/visitor-keys": "8.49.0",
"debug": "^4.3.4",
"minimatch": "^9.0.4",
"semver": "^7.6.0",
"tinyglobby": "^0.2.15",
- "ts-api-utils": "^2.2.0"
+ "ts-api-utils": "^2.1.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -5235,16 +4577,16 @@
}
},
"node_modules/@typescript-eslint/utils": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.51.0.tgz",
- "integrity": "sha512-11rZYxSe0zabiKaCP2QAwRf/dnmgFgvTmeDTtZvUvXG3UuAdg/GU02NExmmIXzz3vLGgMdtrIosI84jITQOxUA==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.49.0.tgz",
+ "integrity": "sha512-N3W7rJw7Rw+z1tRsHZbK395TWSYvufBXumYtEGzypgMUthlg0/hmCImeA8hgO2d2G4pd7ftpxxul2J8OdtdaFA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@eslint-community/eslint-utils": "^4.7.0",
- "@typescript-eslint/scope-manager": "8.51.0",
- "@typescript-eslint/types": "8.51.0",
- "@typescript-eslint/typescript-estree": "8.51.0"
+ "@typescript-eslint/scope-manager": "8.49.0",
+ "@typescript-eslint/types": "8.49.0",
+ "@typescript-eslint/typescript-estree": "8.49.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -5259,13 +4601,13 @@
}
},
"node_modules/@typescript-eslint/visitor-keys": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.51.0.tgz",
- "integrity": "sha512-mM/JRQOzhVN1ykejrvwnBRV3+7yTKK8tVANVN3o1O0t0v7o+jqdVu9crPy5Y9dov15TJk/FTIgoUGHrTOVL3Zg==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.49.0.tgz",
+ "integrity": "sha512-LlKaciDe3GmZFphXIc79THF/YYBugZ7FS1pO581E/edlVVNbZKDy93evqmrfQ9/Y4uN0vVhX4iuchq26mK/iiA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.51.0",
+ "@typescript-eslint/types": "8.49.0",
"eslint-visitor-keys": "^4.2.1"
},
"engines": {
@@ -5304,16 +4646,16 @@
}
},
"node_modules/@vitest/expect": {
- "version": "4.0.16",
- "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.16.tgz",
- "integrity": "sha512-eshqULT2It7McaJkQGLkPjPjNph+uevROGuIMJdG3V+0BSR2w9u6J9Lwu+E8cK5TETlfou8GRijhafIMhXsimA==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.15.tgz",
+ "integrity": "sha512-Gfyva9/GxPAWXIWjyGDli9O+waHDC0Q0jaLdFP1qPAUUfo1FEXPXUfUkp3eZA0sSq340vPycSyOlYUeM15Ft1w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@standard-schema/spec": "^1.0.0",
"@types/chai": "^5.2.2",
- "@vitest/spy": "4.0.16",
- "@vitest/utils": "4.0.16",
+ "@vitest/spy": "4.0.15",
+ "@vitest/utils": "4.0.15",
"chai": "^6.2.1",
"tinyrainbow": "^3.0.3"
},
@@ -5322,13 +4664,13 @@
}
},
"node_modules/@vitest/mocker": {
- "version": "4.0.16",
- "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.16.tgz",
- "integrity": "sha512-yb6k4AZxJTB+q9ycAvsoxGn+j/po0UaPgajllBgt1PzoMAAmJGYFdDk0uCcRcxb3BrME34I6u8gHZTQlkqSZpg==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.15.tgz",
+ "integrity": "sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/spy": "4.0.16",
+ "@vitest/spy": "4.0.15",
"estree-walker": "^3.0.3",
"magic-string": "^0.30.21"
},
@@ -5349,9 +4691,9 @@
}
},
"node_modules/@vitest/pretty-format": {
- "version": "4.0.16",
- "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.16.tgz",
- "integrity": "sha512-eNCYNsSty9xJKi/UdVD8Ou16alu7AYiS2fCPRs0b1OdhJiV89buAXQLpTbe+X8V9L6qrs9CqyvU7OaAopJYPsA==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.15.tgz",
+ "integrity": "sha512-SWdqR8vEv83WtZcrfLNqlqeQXlQLh2iilO1Wk1gv4eiHKjEzvgHb2OVc3mIPyhZE6F+CtfYjNlDJwP5MN6Km7A==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -5362,13 +4704,13 @@
}
},
"node_modules/@vitest/runner": {
- "version": "4.0.16",
- "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.16.tgz",
- "integrity": "sha512-VWEDm5Wv9xEo80ctjORcTQRJ539EGPB3Pb9ApvVRAY1U/WkHXmmYISqU5E79uCwcW7xYUV38gwZD+RV755fu3Q==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.15.tgz",
+ "integrity": "sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/utils": "4.0.16",
+ "@vitest/utils": "4.0.15",
"pathe": "^2.0.3"
},
"funding": {
@@ -5376,13 +4718,13 @@
}
},
"node_modules/@vitest/snapshot": {
- "version": "4.0.16",
- "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.16.tgz",
- "integrity": "sha512-sf6NcrYhYBsSYefxnry+DR8n3UV4xWZwWxYbCJUt2YdvtqzSPR7VfGrY0zsv090DAbjFZsi7ZaMi1KnSRyK1XA==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.15.tgz",
+ "integrity": "sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/pretty-format": "4.0.16",
+ "@vitest/pretty-format": "4.0.15",
"magic-string": "^0.30.21",
"pathe": "^2.0.3"
},
@@ -5391,9 +4733,9 @@
}
},
"node_modules/@vitest/spy": {
- "version": "4.0.16",
- "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.16.tgz",
- "integrity": "sha512-4jIOWjKP0ZUaEmJm00E0cOBLU+5WE0BpeNr3XN6TEF05ltro6NJqHWxXD0kA8/Zc8Nh23AT8WQxwNG+WeROupw==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.15.tgz",
+ "integrity": "sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==",
"dev": true,
"license": "MIT",
"funding": {
@@ -5401,13 +4743,13 @@
}
},
"node_modules/@vitest/utils": {
- "version": "4.0.16",
- "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.16.tgz",
- "integrity": "sha512-h8z9yYhV3e1LEfaQ3zdypIrnAg/9hguReGZoS7Gl0aBG5xgA410zBqECqmaF/+RkTggRsfnzc1XaAHA6bmUufA==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.15.tgz",
+ "integrity": "sha512-HXjPW2w5dxhTD0dLwtYHDnelK3j8sR8cWIaLxr22evTyY6q8pRCjZSmhRWVjBaOVXChQd6AwMzi9pucorXCPZA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/pretty-format": "4.0.16",
+ "@vitest/pretty-format": "4.0.15",
"tinyrainbow": "^3.0.3"
},
"funding": {
@@ -5425,37 +4767,47 @@
}
},
"node_modules/@xterm/addon-fit": {
- "version": "0.11.0",
- "resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.11.0.tgz",
- "integrity": "sha512-jYcgT6xtVYhnhgxh3QgYDnnNMYTcf8ElbxxFzX0IZo+vabQqSPAjC3c1wJrKB5E19VwQei89QCiZZP86DCPF7g==",
- "license": "MIT"
+ "version": "0.10.0",
+ "resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.10.0.tgz",
+ "integrity": "sha512-UFYkDm4HUahf2lnEyHvio51TNGiLK66mqP2JoATy7hRZeXaGMRDr00JiSF7m63vR5WKATF605yEggJKsw0JpMQ==",
+ "license": "MIT",
+ "peerDependencies": {
+ "@xterm/xterm": "^5.0.0"
+ }
},
"node_modules/@xterm/addon-serialize": {
- "version": "0.14.0",
- "resolved": "https://registry.npmjs.org/@xterm/addon-serialize/-/addon-serialize-0.14.0.tgz",
- "integrity": "sha512-uteyTU1EkrQa2Ux6P/uFl2fzmXI46jy5uoQMKEOM0fKTyiW7cSn0WrFenHm5vO5uEXX/GpwW/FgILvv3r0WbkA==",
- "license": "MIT"
+ "version": "0.13.0",
+ "resolved": "https://registry.npmjs.org/@xterm/addon-serialize/-/addon-serialize-0.13.0.tgz",
+ "integrity": "sha512-kGs8o6LWAmN1l2NpMp01/YkpxbmO4UrfWybeGu79Khw5K9+Krp7XhXbBTOTc3GJRRhd6EmILjpR8k5+odY39YQ==",
+ "license": "MIT",
+ "peerDependencies": {
+ "@xterm/xterm": "^5.0.0"
+ }
},
"node_modules/@xterm/addon-web-links": {
- "version": "0.12.0",
- "resolved": "https://registry.npmjs.org/@xterm/addon-web-links/-/addon-web-links-0.12.0.tgz",
- "integrity": "sha512-4Smom3RPyVp7ZMYOYDoC/9eGJJJqYhnPLGGqJ6wOBfB8VxPViJNSKdgRYb8NpaM6YSelEKbA2SStD7lGyqaobw==",
- "license": "MIT"
+ "version": "0.11.0",
+ "resolved": "https://registry.npmjs.org/@xterm/addon-web-links/-/addon-web-links-0.11.0.tgz",
+ "integrity": "sha512-nIHQ38pQI+a5kXnRaTgwqSHnX7KE6+4SVoceompgHL26unAxdfP6IPqUTSYPQgSwM56hsElfoNrrW5V7BUED/Q==",
+ "license": "MIT",
+ "peerDependencies": {
+ "@xterm/xterm": "^5.0.0"
+ }
},
"node_modules/@xterm/addon-webgl": {
- "version": "0.19.0",
- "resolved": "https://registry.npmjs.org/@xterm/addon-webgl/-/addon-webgl-0.19.0.tgz",
- "integrity": "sha512-b3fMOsyLVuCeNJWxolACEUED0vm7qC0cy4wRvf3oURSzDTYVQiGPhTnhWZwIHdvC48Y+oLhvYXnY4XDXPoJo6A==",
- "license": "MIT"
+ "version": "0.18.0",
+ "resolved": "https://registry.npmjs.org/@xterm/addon-webgl/-/addon-webgl-0.18.0.tgz",
+ "integrity": "sha512-xCnfMBTI+/HKPdRnSOHaJDRqEpq2Ugy8LEj9GiY4J3zJObo3joylIFaMvzBwbYRg8zLtkO0KQaStCeSfoaI2/w==",
+ "license": "MIT",
+ "peerDependencies": {
+ "@xterm/xterm": "^5.0.0"
+ }
},
"node_modules/@xterm/xterm": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-6.0.0.tgz",
- "integrity": "sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg==",
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-5.5.0.tgz",
+ "integrity": "sha512-hqJHYaQb5OptNunnyAnkHyM8aCjZ1MEIDTQu1iIbbTD/xops91NB5yq1ZK/dC2JDbVWtF23zUtl9JE2NqwT87A==",
"license": "MIT",
- "workspaces": [
- "addons/*"
- ]
+ "peer": true
},
"node_modules/7zip-bin": {
"version": "5.2.0",
@@ -5465,14 +4817,11 @@
"license": "MIT"
},
"node_modules/abbrev": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz",
- "integrity": "sha512-AO2ac6pjRB3SJmGJo+v5/aK6Omggp6fsLrs6wN9bd35ulu4cCwaAU9+7ZhXjeqHVkaHThLuzH0nZr0YpCDhygg==",
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
+ "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
"dev": true,
- "license": "ISC",
- "engines": {
- "node": "^18.17.0 || >=20.5.0"
- }
+ "license": "ISC"
},
"node_modules/acorn": {
"version": "8.15.0",
@@ -5680,63 +5029,12 @@
"semver": "^7.3.5",
"tar": "^6.0.5",
"yargs": "^17.0.1"
- },
- "bin": {
- "electron-rebuild": "lib/cli.js"
- },
- "engines": {
- "node": ">=12.13.0"
- }
- },
- "node_modules/app-builder-lib/node_modules/fs-extra": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
- "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/app-builder-lib/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/app-builder-lib/node_modules/node-abi": {
- "version": "3.85.0",
- "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.85.0.tgz",
- "integrity": "sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "semver": "^7.3.5"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/app-builder-lib/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
+ },
+ "bin": {
+ "electron-rebuild": "lib/cli.js"
+ },
"engines": {
- "node": ">= 10.0.0"
+ "node": ">=12.13.0"
}
},
"node_modules/argparse": {
@@ -6074,25 +5372,15 @@
"license": "MIT"
},
"node_modules/baseline-browser-mapping": {
- "version": "2.9.11",
- "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.11.tgz",
- "integrity": "sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==",
+ "version": "2.9.7",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.7.tgz",
+ "integrity": "sha512-k9xFKplee6KIio3IDbwj+uaCLpqzOwakOgmqzPezM0sFJlFKcg30vk2wOiAJtkTSfx0SSQDSe8q+mWA/fSH5Zg==",
"dev": true,
"license": "Apache-2.0",
"bin": {
"baseline-browser-mapping": "dist/cli.js"
}
},
- "node_modules/bidi-js": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz",
- "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "require-from-string": "^2.0.2"
- }
- },
"node_modules/bl": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz",
@@ -6253,44 +5541,6 @@
"node": ">=12.0.0"
}
},
- "node_modules/builder-util/node_modules/fs-extra": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
- "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/builder-util/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/builder-util/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
"node_modules/cac": {
"version": "6.7.14",
"resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz",
@@ -6302,118 +5552,43 @@
}
},
"node_modules/cacache": {
- "version": "19.0.1",
- "resolved": "https://registry.npmjs.org/cacache/-/cacache-19.0.1.tgz",
- "integrity": "sha512-hdsUxulXCi5STId78vRVYEtDAjq99ICAUktLTeTYsLoTE6Z8dS0c8pWNCxwdrk9YfJeobDZc2Y186hD/5ZQgFQ==",
+ "version": "16.1.3",
+ "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.1.3.tgz",
+ "integrity": "sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ==",
"dev": true,
"license": "ISC",
"dependencies": {
- "@npmcli/fs": "^4.0.0",
- "fs-minipass": "^3.0.0",
- "glob": "^10.2.2",
- "lru-cache": "^10.0.1",
- "minipass": "^7.0.3",
- "minipass-collect": "^2.0.1",
+ "@npmcli/fs": "^2.1.0",
+ "@npmcli/move-file": "^2.0.0",
+ "chownr": "^2.0.0",
+ "fs-minipass": "^2.1.0",
+ "glob": "^8.0.1",
+ "infer-owner": "^1.0.4",
+ "lru-cache": "^7.7.1",
+ "minipass": "^3.1.6",
+ "minipass-collect": "^1.0.2",
"minipass-flush": "^1.0.5",
"minipass-pipeline": "^1.2.4",
- "p-map": "^7.0.2",
- "ssri": "^12.0.0",
- "tar": "^7.4.3",
- "unique-filename": "^4.0.0"
+ "mkdirp": "^1.0.4",
+ "p-map": "^4.0.0",
+ "promise-inflight": "^1.0.1",
+ "rimraf": "^3.0.2",
+ "ssri": "^9.0.0",
+ "tar": "^6.1.11",
+ "unique-filename": "^2.0.0"
},
"engines": {
- "node": "^18.17.0 || >=20.5.0"
- }
- },
- "node_modules/cacache/node_modules/brace-expansion": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
- "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "balanced-match": "^1.0.0"
- }
- },
- "node_modules/cacache/node_modules/chownr": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz",
- "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "engines": {
- "node": ">=18"
- }
- },
- "node_modules/cacache/node_modules/glob": {
- "version": "10.5.0",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz",
- "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "foreground-child": "^3.1.0",
- "jackspeak": "^3.1.2",
- "minimatch": "^9.0.4",
- "minipass": "^7.1.2",
- "package-json-from-dist": "^1.0.0",
- "path-scurry": "^1.11.1"
- },
- "bin": {
- "glob": "dist/esm/bin.mjs"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/cacache/node_modules/lru-cache": {
- "version": "10.4.3",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz",
- "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==",
- "dev": true,
- "license": "ISC"
- },
- "node_modules/cacache/node_modules/minimatch": {
- "version": "9.0.5",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
- "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
+ "version": "7.18.3",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
+ "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
"dev": true,
"license": "ISC",
- "dependencies": {
- "brace-expansion": "^2.0.1"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/cacache/node_modules/tar": {
- "version": "7.5.2",
- "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.2.tgz",
- "integrity": "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "dependencies": {
- "@isaacs/fs-minipass": "^4.0.0",
- "chownr": "^3.0.0",
- "minipass": "^7.1.2",
- "minizlib": "^3.1.0",
- "yallist": "^5.0.0"
- },
- "engines": {
- "node": ">=18"
- }
- },
- "node_modules/cacache/node_modules/yallist": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz",
- "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==",
- "dev": true,
- "license": "BlueOak-1.0.0",
"engines": {
- "node": ">=18"
+ "node": ">=12"
}
},
"node_modules/cacheable-lookup": {
@@ -6506,9 +5681,9 @@
}
},
"node_modules/caniuse-lite": {
- "version": "1.0.30001762",
- "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001762.tgz",
- "integrity": "sha512-PxZwGNvH7Ak8WX5iXzoK1KPZttBXNPuaOvI2ZYU7NrlM+d9Ov+TUvlLOBNGzVXAntMSMMlJPd+jY6ovrVjSmUw==",
+ "version": "1.0.30001760",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001760.tgz",
+ "integrity": "sha512-7AAMPcueWELt1p3mi13HR/LHH0TJLT11cnwDJEs3xA4+CK/PLKeO9Kl1oru24htkyUKtkGCvAx4ohB0Ttry8Dw==",
"dev": true,
"funding": [
{
@@ -6537,9 +5712,9 @@
}
},
"node_modules/chai": {
- "version": "6.2.2",
- "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz",
- "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==",
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.1.tgz",
+ "integrity": "sha512-p4Z49OGG5W/WBCPSS/dH3jQ73kD6tiMmUM+bckNK6Jr5JHMG3k9bg/BvKR8lKmtVBKmOiuVaV2ws8s9oSbwysg==",
"dev": true,
"license": "MIT",
"engines": {
@@ -6674,19 +5849,16 @@
}
},
"node_modules/cli-cursor": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz",
- "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==",
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz",
+ "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "restore-cursor": "^5.0.0"
+ "restore-cursor": "^3.1.0"
},
"engines": {
- "node": ">=18"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
+ "node": ">=8"
}
},
"node_modules/cli-spinners": {
@@ -6735,37 +5907,6 @@
"node": ">=12"
}
},
- "node_modules/cliui/node_modules/strip-ansi": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/cliui/node_modules/wrap-ansi": {
- "version": "7.0.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
- "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-styles": "^4.0.0",
- "string-width": "^4.1.0",
- "strip-ansi": "^6.0.0"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
- }
- },
"node_modules/clone": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz",
@@ -6933,6 +6074,16 @@
"url": "https://github.com/sponsors/isaacs"
}
},
+ "node_modules/config-file-ts/node_modules/minipass": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
+ "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ }
+ },
"node_modules/convert-source-map": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
@@ -6968,24 +6119,6 @@
"optional": true,
"peer": true
},
- "node_modules/cross-env": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/cross-env/-/cross-env-10.1.0.tgz",
- "integrity": "sha512-GsYosgnACZTADcmEyJctkJIoqAhHjttw7RsFrVoJNXbsWWqaq6Ym+7kZjq6mS45O0jij6vtiReppKQEtqWy6Dw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@epic-web/invariant": "^1.0.0",
- "cross-spawn": "^7.0.6"
- },
- "bin": {
- "cross-env": "dist/bin/cross-env.js",
- "cross-env-shell": "dist/bin/cross-env-shell.js"
- },
- "engines": {
- "node": ">=20"
- }
- },
"node_modules/cross-spawn": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
@@ -7001,19 +6134,12 @@
"node": ">= 8"
}
},
- "node_modules/css-tree": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz",
- "integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==",
+ "node_modules/css.escape": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz",
+ "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==",
"dev": true,
- "license": "MIT",
- "dependencies": {
- "mdn-data": "2.12.2",
- "source-map-js": "^1.0.1"
- },
- "engines": {
- "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0"
- }
+ "license": "MIT"
},
"node_modules/cssesc": {
"version": "3.0.0",
@@ -7028,29 +6154,17 @@
}
},
"node_modules/cssstyle": {
- "version": "5.3.6",
- "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.6.tgz",
- "integrity": "sha512-legscpSpgSAeGEe0TNcai97DKt9Vd9AsAdOL7Uoetb52Ar/8eJm3LIa39qpv8wWzLFlNG4vVvppQM+teaMPj3A==",
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.6.0.tgz",
+ "integrity": "sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@asamuzakjp/css-color": "^4.1.1",
- "@csstools/css-syntax-patches-for-csstree": "^1.0.21",
- "css-tree": "^3.1.0",
- "lru-cache": "^11.2.4"
+ "@asamuzakjp/css-color": "^3.2.0",
+ "rrweb-cssom": "^0.8.0"
},
"engines": {
- "node": ">=20"
- }
- },
- "node_modules/cssstyle/node_modules/lru-cache": {
- "version": "11.2.4",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz",
- "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "engines": {
- "node": "20 || >=22"
+ "node": ">=18"
}
},
"node_modules/csstype": {
@@ -7060,17 +6174,17 @@
"license": "MIT"
},
"node_modules/data-urls": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-6.0.0.tgz",
- "integrity": "sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA==",
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz",
+ "integrity": "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==",
"dev": true,
"license": "MIT",
"dependencies": {
"whatwg-mimetype": "^4.0.0",
- "whatwg-url": "^15.0.0"
+ "whatwg-url": "^14.0.0"
},
"engines": {
- "node": ">=20"
+ "node": ">=18"
}
},
"node_modules/data-view-buffer": {
@@ -7336,63 +6450,25 @@
"brace-expansion": "^1.1.7"
},
"engines": {
- "node": "*"
- }
- },
- "node_modules/dmg-builder": {
- "version": "26.0.12",
- "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-26.0.12.tgz",
- "integrity": "sha512-59CAAjAhTaIMCN8y9kD573vDkxbs1uhDcrFLHSgutYdPcGOU35Rf95725snvzEOy4BFB7+eLJ8djCNPmGwG67w==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "app-builder-lib": "26.0.12",
- "builder-util": "26.0.11",
- "builder-util-runtime": "9.3.1",
- "fs-extra": "^10.1.0",
- "iconv-lite": "^0.6.2",
- "js-yaml": "^4.1.0"
- },
- "optionalDependencies": {
- "dmg-license": "^1.0.11"
- }
- },
- "node_modules/dmg-builder/node_modules/fs-extra": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
- "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=12"
+ "node": "*"
}
},
- "node_modules/dmg-builder/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
+ "node_modules/dmg-builder": {
+ "version": "26.0.12",
+ "resolved": "https://registry.npmjs.org/dmg-builder/-/dmg-builder-26.0.12.tgz",
+ "integrity": "sha512-59CAAjAhTaIMCN8y9kD573vDkxbs1uhDcrFLHSgutYdPcGOU35Rf95725snvzEOy4BFB7+eLJ8djCNPmGwG67w==",
"dev": true,
"license": "MIT",
"dependencies": {
- "universalify": "^2.0.0"
+ "app-builder-lib": "26.0.12",
+ "builder-util": "26.0.11",
+ "builder-util-runtime": "9.3.1",
+ "fs-extra": "^10.1.0",
+ "iconv-lite": "^0.6.2",
+ "js-yaml": "^4.1.0"
},
"optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/dmg-builder/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
+ "dmg-license": "^1.0.11"
}
},
"node_modules/dmg-license": {
@@ -7568,44 +6644,6 @@
"electron-winstaller": "5.4.0"
}
},
- "node_modules/electron-builder/node_modules/fs-extra": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
- "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/electron-builder/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/electron-builder/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
"node_modules/electron-log": {
"version": "5.4.3",
"resolved": "https://registry.npmjs.org/electron-log/-/electron-log-5.4.3.tgz",
@@ -7632,44 +6670,6 @@
"mime": "^2.5.2"
}
},
- "node_modules/electron-publish/node_modules/fs-extra": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
- "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/electron-publish/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/electron-publish/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
"node_modules/electron-to-chromium": {
"version": "1.5.267",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz",
@@ -7693,41 +6693,6 @@
"tiny-typed-emitter": "^2.1.0"
}
},
- "node_modules/electron-updater/node_modules/fs-extra": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
- "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
- "license": "MIT",
- "dependencies": {
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/electron-updater/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
- "license": "MIT",
- "dependencies": {
- "universalify": "^2.0.0"
- },
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/electron-updater/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "license": "MIT",
- "engines": {
- "node": ">= 10.0.0"
- }
- },
"node_modules/electron-vite": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/electron-vite/-/electron-vite-5.0.0.tgz",
@@ -7796,6 +6761,28 @@
"node": ">=6 <7 || >=8"
}
},
+ "node_modules/electron-winstaller/node_modules/jsonfile": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+ "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/electron-winstaller/node_modules/universalify": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+ "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
"node_modules/electron/node_modules/@types/node": {
"version": "22.19.3",
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.3.tgz",
@@ -8353,9 +7340,9 @@
}
},
"node_modules/esquery": {
- "version": "1.7.0",
- "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz",
- "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==",
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz",
+ "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
@@ -8511,6 +7498,24 @@
"pend": "~1.2.0"
}
},
+ "node_modules/fdir": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
+ "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "peerDependencies": {
+ "picomatch": "^3 || ^4"
+ },
+ "peerDependenciesMeta": {
+ "picomatch": {
+ "optional": true
+ }
+ }
+ },
"node_modules/file-entry-cache": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz",
@@ -8641,6 +7646,19 @@
"url": "https://github.com/sponsors/isaacs"
}
},
+ "node_modules/foreground-child/node_modules/signal-exit": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
+ "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
"node_modules/form-data": {
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
@@ -8700,31 +7718,30 @@
}
},
"node_modules/fs-extra": {
- "version": "8.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz",
- "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==",
- "dev": true,
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+ "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
"license": "MIT",
"dependencies": {
"graceful-fs": "^4.2.0",
- "jsonfile": "^4.0.0",
- "universalify": "^0.1.0"
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
},
"engines": {
- "node": ">=6 <7 || >=8"
+ "node": ">=12"
}
},
"node_modules/fs-minipass": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-3.0.3.tgz",
- "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==",
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz",
+ "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==",
"dev": true,
"license": "ISC",
"dependencies": {
- "minipass": "^7.0.3"
+ "minipass": "^3.0.0"
},
"engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
+ "node": ">= 8"
}
},
"node_modules/fs.realpath": {
@@ -8916,9 +7933,9 @@
}
},
"node_modules/glob": {
- "version": "7.2.3",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
- "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz",
+ "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==",
"deprecated": "Glob versions prior to v9 are no longer supported",
"dev": true,
"license": "ISC",
@@ -8926,12 +7943,11 @@
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
- "minimatch": "^3.1.1",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
+ "minimatch": "^5.0.1",
+ "once": "^1.3.0"
},
"engines": {
- "node": "*"
+ "node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
@@ -8950,17 +7966,27 @@
"node": ">=10.13.0"
}
},
+ "node_modules/glob/node_modules/brace-expansion": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "balanced-match": "^1.0.0"
+ }
+ },
"node_modules/glob/node_modules/minimatch": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
- "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "version": "5.1.6",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz",
+ "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==",
"dev": true,
"license": "ISC",
"dependencies": {
- "brace-expansion": "^1.1.7"
+ "brace-expansion": "^2.0.1"
},
"engines": {
- "node": "*"
+ "node": ">=10"
}
},
"node_modules/global-agent": {
@@ -8983,9 +8009,9 @@
}
},
"node_modules/globals": {
- "version": "17.0.0",
- "resolved": "https://registry.npmjs.org/globals/-/globals-17.0.0.tgz",
- "integrity": "sha512-gv5BeD2EssA793rlFWVPMMCqefTlpusw6/2TbAVMy0FzcG8wKJn4O+NqJ4+XWmmwrayJgw5TzrmWjFgmz1XPqw==",
+ "version": "16.5.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-16.5.0.tgz",
+ "integrity": "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==",
"dev": true,
"license": "MIT",
"engines": {
@@ -9242,16 +8268,16 @@
"license": "ISC"
},
"node_modules/html-encoding-sniffer": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-6.0.0.tgz",
- "integrity": "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg==",
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz",
+ "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@exodus/bytes": "^1.6.0"
+ "whatwg-encoding": "^3.1.1"
},
"engines": {
- "node": "^20.19.0 || ^22.12.0 || >=24.0.0"
+ "node": ">=18"
}
},
"node_modules/html-parse-stringify": {
@@ -10152,35 +9178,35 @@
}
},
"node_modules/jsdom": {
- "version": "27.4.0",
- "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.4.0.tgz",
- "integrity": "sha512-mjzqwWRD9Y1J1KUi7W97Gja1bwOOM5Ug0EZ6UDK3xS7j7mndrkwozHtSblfomlzyB4NepioNt+B2sOSzczVgtQ==",
+ "version": "26.1.0",
+ "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-26.1.0.tgz",
+ "integrity": "sha512-Cvc9WUhxSMEo4McES3P7oK3QaXldCfNWp7pl2NNeiIFlCoLr3kfq9kb1fxftiwk1FLV7CvpvDfonxtzUDeSOPg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@acemir/cssom": "^0.9.28",
- "@asamuzakjp/dom-selector": "^6.7.6",
- "@exodus/bytes": "^1.6.0",
- "cssstyle": "^5.3.4",
- "data-urls": "^6.0.0",
- "decimal.js": "^10.6.0",
- "html-encoding-sniffer": "^6.0.0",
+ "cssstyle": "^4.2.1",
+ "data-urls": "^5.0.0",
+ "decimal.js": "^10.5.0",
+ "html-encoding-sniffer": "^4.0.0",
"http-proxy-agent": "^7.0.2",
"https-proxy-agent": "^7.0.6",
"is-potential-custom-element-name": "^1.0.1",
- "parse5": "^8.0.0",
+ "nwsapi": "^2.2.16",
+ "parse5": "^7.2.1",
+ "rrweb-cssom": "^0.8.0",
"saxes": "^6.0.0",
"symbol-tree": "^3.2.4",
- "tough-cookie": "^6.0.0",
+ "tough-cookie": "^5.1.1",
"w3c-xmlserializer": "^5.0.0",
- "webidl-conversions": "^8.0.0",
+ "webidl-conversions": "^7.0.0",
+ "whatwg-encoding": "^3.1.1",
"whatwg-mimetype": "^4.0.0",
- "whatwg-url": "^15.1.0",
- "ws": "^8.18.3",
+ "whatwg-url": "^14.1.1",
+ "ws": "^8.18.0",
"xml-name-validator": "^5.0.0"
},
"engines": {
- "node": "^20.19.0 || ^22.12.0 || >=24.0.0"
+ "node": ">=18"
},
"peerDependencies": {
"canvas": "^3.0.0"
@@ -10247,11 +9273,13 @@
}
},
"node_modules/jsonfile": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
- "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==",
- "dev": true,
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
+ "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
"license": "MIT",
+ "dependencies": {
+ "universalify": "^2.0.0"
+ },
"optionalDependencies": {
"graceful-fs": "^4.1.6"
}
@@ -10616,6 +9644,19 @@
"node": ">=20.0.0"
}
},
+ "node_modules/listr2/node_modules/ansi-regex": {
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
+ "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ }
+ },
"node_modules/listr2/node_modules/ansi-styles": {
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
@@ -10646,6 +9687,13 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/listr2/node_modules/emoji-regex": {
+ "version": "10.6.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz",
+ "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/listr2/node_modules/is-fullwidth-code-point": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz",
@@ -10696,6 +9744,58 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/listr2/node_modules/strip-ansi": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
+ "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-regex": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/strip-ansi?sponsor=1"
+ }
+ },
+ "node_modules/listr2/node_modules/wrap-ansi": {
+ "version": "9.0.2",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
+ "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^6.2.1",
+ "string-width": "^7.0.0",
+ "strip-ansi": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/listr2/node_modules/wrap-ansi/node_modules/string-width": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
+ "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "emoji-regex": "^10.3.0",
+ "get-east-asian-width": "^1.0.0",
+ "strip-ansi": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
"node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
@@ -10776,6 +9876,19 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/log-update/node_modules/ansi-regex": {
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
+ "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ }
+ },
"node_modules/log-update/node_modules/ansi-styles": {
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
@@ -10789,6 +9902,29 @@
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
+ "node_modules/log-update/node_modules/cli-cursor": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz",
+ "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "restore-cursor": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/log-update/node_modules/emoji-regex": {
+ "version": "10.6.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz",
+ "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/log-update/node_modules/is-fullwidth-code-point": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz",
@@ -10805,6 +9941,52 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/log-update/node_modules/onetime": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz",
+ "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "mimic-function": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/log-update/node_modules/restore-cursor": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz",
+ "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "onetime": "^7.0.0",
+ "signal-exit": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/log-update/node_modules/signal-exit": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
+ "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
"node_modules/log-update/node_modules/slice-ansi": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-7.1.2.tgz",
@@ -10822,6 +10004,58 @@
"url": "https://github.com/chalk/slice-ansi?sponsor=1"
}
},
+ "node_modules/log-update/node_modules/string-width": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
+ "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "emoji-regex": "^10.3.0",
+ "get-east-asian-width": "^1.0.0",
+ "strip-ansi": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/log-update/node_modules/strip-ansi": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
+ "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-regex": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/strip-ansi?sponsor=1"
+ }
+ },
+ "node_modules/log-update/node_modules/wrap-ansi": {
+ "version": "9.0.2",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
+ "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^6.2.1",
+ "string-width": "^7.0.0",
+ "strip-ansi": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
"node_modules/longest-streak": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
@@ -10866,9 +10100,9 @@
}
},
"node_modules/lucide-react": {
- "version": "0.562.0",
- "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.562.0.tgz",
- "integrity": "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==",
+ "version": "0.560.0",
+ "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.560.0.tgz",
+ "integrity": "sha512-NwKoUA/aBShsdL8WE5lukV2F/tjHzQRlonQs7fkNGI1sCT0Ay4a9Ap3ST2clUUkcY+9eQ0pBe2hybTQd2fmyDA==",
"license": "ISC",
"peerDependencies": {
"react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
@@ -10896,26 +10130,83 @@
}
},
"node_modules/make-fetch-happen": {
- "version": "14.0.3",
- "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-14.0.3.tgz",
- "integrity": "sha512-QMjGbFTP0blj97EeidG5hk/QhKQ3T4ICckQGLgz38QF7Vgbk6e6FTARN8KhKxyBbWn8R0HU+bnw8aSoFPD4qtQ==",
+ "version": "10.2.1",
+ "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz",
+ "integrity": "sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w==",
"dev": true,
"license": "ISC",
"dependencies": {
- "@npmcli/agent": "^3.0.0",
- "cacache": "^19.0.1",
- "http-cache-semantics": "^4.1.1",
- "minipass": "^7.0.2",
- "minipass-fetch": "^4.0.0",
+ "agentkeepalive": "^4.2.1",
+ "cacache": "^16.1.0",
+ "http-cache-semantics": "^4.1.0",
+ "http-proxy-agent": "^5.0.0",
+ "https-proxy-agent": "^5.0.0",
+ "is-lambda": "^1.0.1",
+ "lru-cache": "^7.7.1",
+ "minipass": "^3.1.6",
+ "minipass-collect": "^1.0.2",
+ "minipass-fetch": "^2.0.3",
"minipass-flush": "^1.0.5",
"minipass-pipeline": "^1.2.4",
- "negotiator": "^1.0.0",
- "proc-log": "^5.0.0",
+ "negotiator": "^0.6.3",
"promise-retry": "^2.0.1",
- "ssri": "^12.0.0"
+ "socks-proxy-agent": "^7.0.0",
+ "ssri": "^9.0.0"
+ },
+ "engines": {
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
+ }
+ },
+ "node_modules/make-fetch-happen/node_modules/agent-base": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
+ "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 6.0.0"
+ }
+ },
+ "node_modules/make-fetch-happen/node_modules/http-proxy-agent": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz",
+ "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@tootallnate/once": "2",
+ "agent-base": "6",
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/make-fetch-happen/node_modules/https-proxy-agent": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz",
+ "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "agent-base": "6",
+ "debug": "4"
},
"engines": {
- "node": "^18.17.0 || >=20.5.0"
+ "node": ">= 6"
+ }
+ },
+ "node_modules/make-fetch-happen/node_modules/lru-cache": {
+ "version": "7.18.3",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
+ "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
}
},
"node_modules/markdown-table": {
@@ -11234,13 +10525,6 @@
"url": "https://opencollective.com/unified"
}
},
- "node_modules/mdn-data": {
- "version": "2.12.2",
- "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz",
- "integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==",
- "dev": true,
- "license": "CC0-1.0"
- },
"node_modules/micromark": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz",
@@ -11818,6 +11102,19 @@
"node": ">=8.6"
}
},
+ "node_modules/micromatch/node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
"node_modules/mime": {
"version": "2.6.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz",
@@ -11887,6 +11184,16 @@
"node": ">=4"
}
},
+ "node_modules/min-indent": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz",
+ "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
"node_modules/minimatch": {
"version": "10.1.1",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz",
@@ -11914,41 +11221,44 @@
}
},
"node_modules/minipass": {
- "version": "7.1.2",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
- "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
+ "version": "3.3.6",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz",
+ "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==",
"dev": true,
"license": "ISC",
+ "dependencies": {
+ "yallist": "^4.0.0"
+ },
"engines": {
- "node": ">=16 || 14 >=14.17"
+ "node": ">=8"
}
},
"node_modules/minipass-collect": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-2.0.1.tgz",
- "integrity": "sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==",
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz",
+ "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==",
"dev": true,
"license": "ISC",
"dependencies": {
- "minipass": "^7.0.3"
+ "minipass": "^3.0.0"
},
"engines": {
- "node": ">=16 || 14 >=14.17"
+ "node": ">= 8"
}
},
"node_modules/minipass-fetch": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-4.0.1.tgz",
- "integrity": "sha512-j7U11C5HXigVuutxebFadoYBbd7VSdZWggSe64NVdvWNBqGAiXPL2QVCehjmw7lY1oF9gOllYbORh+hiNgfPgQ==",
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.1.2.tgz",
+ "integrity": "sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "minipass": "^7.0.3",
+ "minipass": "^3.1.6",
"minipass-sized": "^1.0.3",
- "minizlib": "^3.0.1"
+ "minizlib": "^2.1.2"
},
"engines": {
- "node": "^18.17.0 || >=20.5.0"
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
},
"optionalDependencies": {
"encoding": "^0.1.13"
@@ -11967,26 +11277,6 @@
"node": ">= 8"
}
},
- "node_modules/minipass-flush/node_modules/minipass": {
- "version": "3.3.6",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz",
- "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "yallist": "^4.0.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/minipass-flush/node_modules/yallist": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
- "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
- "dev": true,
- "license": "ISC"
- },
"node_modules/minipass-pipeline": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz",
@@ -12000,26 +11290,6 @@
"node": ">=8"
}
},
- "node_modules/minipass-pipeline/node_modules/minipass": {
- "version": "3.3.6",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz",
- "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "yallist": "^4.0.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/minipass-pipeline/node_modules/yallist": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
- "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
- "dev": true,
- "license": "ISC"
- },
"node_modules/minipass-sized": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz",
@@ -12033,20 +11303,7 @@
"node": ">=8"
}
},
- "node_modules/minipass-sized/node_modules/minipass": {
- "version": "3.3.6",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz",
- "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "yallist": "^4.0.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/minipass-sized/node_modules/yallist": {
+ "node_modules/minipass/node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
@@ -12054,18 +11311,26 @@
"license": "ISC"
},
"node_modules/minizlib": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz",
- "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==",
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz",
+ "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "minipass": "^7.1.2"
+ "minipass": "^3.0.0",
+ "yallist": "^4.0.0"
},
"engines": {
- "node": ">= 18"
+ "node": ">= 8"
}
},
+ "node_modules/minizlib/node_modules/yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "dev": true,
+ "license": "ISC"
+ },
"node_modules/mkdirp": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
@@ -12166,9 +11431,9 @@
"license": "MIT"
},
"node_modules/negotiator": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz",
- "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==",
+ "version": "0.6.4",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz",
+ "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==",
"dev": true,
"license": "MIT",
"engines": {
@@ -12176,16 +11441,16 @@
}
},
"node_modules/node-abi": {
- "version": "4.24.0",
- "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-4.24.0.tgz",
- "integrity": "sha512-u2EC1CeNe25uVtX3EZbdQ275c74zdZmmpzrHEQh2aIYqoVjlglfUpOX9YY85x1nlBydEKDVaSmMNhR7N82Qj8A==",
+ "version": "3.85.0",
+ "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.85.0.tgz",
+ "integrity": "sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "semver": "^7.6.3"
+ "semver": "^7.3.5"
},
"engines": {
- "node": ">=22.12.0"
+ "node": ">=10"
}
},
"node_modules/node-addon-api": {
@@ -12206,94 +11471,6 @@
"semver": "^7.3.5"
}
},
- "node_modules/node-gyp": {
- "version": "11.5.0",
- "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-11.5.0.tgz",
- "integrity": "sha512-ra7Kvlhxn5V9Slyus0ygMa2h+UqExPqUIkfk7Pc8QTLT956JLSy51uWFwHtIYy0vI8cB4BDhc/S03+880My/LQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "env-paths": "^2.2.0",
- "exponential-backoff": "^3.1.1",
- "graceful-fs": "^4.2.6",
- "make-fetch-happen": "^14.0.3",
- "nopt": "^8.0.0",
- "proc-log": "^5.0.0",
- "semver": "^7.3.5",
- "tar": "^7.4.3",
- "tinyglobby": "^0.2.12",
- "which": "^5.0.0"
- },
- "bin": {
- "node-gyp": "bin/node-gyp.js"
- },
- "engines": {
- "node": "^18.17.0 || >=20.5.0"
- }
- },
- "node_modules/node-gyp/node_modules/chownr": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz",
- "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "engines": {
- "node": ">=18"
- }
- },
- "node_modules/node-gyp/node_modules/isexe": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/isexe/-/isexe-3.1.1.tgz",
- "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": ">=16"
- }
- },
- "node_modules/node-gyp/node_modules/tar": {
- "version": "7.5.2",
- "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.2.tgz",
- "integrity": "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "dependencies": {
- "@isaacs/fs-minipass": "^4.0.0",
- "chownr": "^3.0.0",
- "minipass": "^7.1.2",
- "minizlib": "^3.1.0",
- "yallist": "^5.0.0"
- },
- "engines": {
- "node": ">=18"
- }
- },
- "node_modules/node-gyp/node_modules/which": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/which/-/which-5.0.0.tgz",
- "integrity": "sha512-JEdGzHwwkrbWoGOlIHqQ5gtprKGOenpDHpxE9zVR1bWbOtYRyPPHMe9FaP6x61CmNaTThSkb0DAJte5jD+DmzQ==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "isexe": "^3.1.1"
- },
- "bin": {
- "node-which": "bin/which.js"
- },
- "engines": {
- "node": "^18.17.0 || >=20.5.0"
- }
- },
- "node_modules/node-gyp/node_modules/yallist": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz",
- "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "engines": {
- "node": ">=18"
- }
- },
"node_modules/node-releases": {
"version": "2.0.27",
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
@@ -12302,19 +11479,19 @@
"license": "MIT"
},
"node_modules/nopt": {
- "version": "8.1.0",
- "resolved": "https://registry.npmjs.org/nopt/-/nopt-8.1.0.tgz",
- "integrity": "sha512-ieGu42u/Qsa4TFktmaKEwM6MQH0pOWnaB3htzh0JRtx84+Mebc0cbZYN5bC+6WTZ4+77xrL9Pn5m7CV6VIkV7A==",
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-6.0.0.tgz",
+ "integrity": "sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==",
"dev": true,
"license": "ISC",
"dependencies": {
- "abbrev": "^3.0.0"
+ "abbrev": "^1.0.0"
},
"bin": {
"nopt": "bin/nopt.js"
},
"engines": {
- "node": "^18.17.0 || >=20.5.0"
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/normalize-url": {
@@ -12330,6 +11507,13 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/nwsapi": {
+ "version": "2.2.23",
+ "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.23.tgz",
+ "integrity": "sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
@@ -12460,16 +11644,16 @@
}
},
"node_modules/onetime": {
- "version": "7.0.0",
- "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz",
- "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==",
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
+ "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "mimic-function": "^5.0.0"
+ "mimic-fn": "^2.1.0"
},
"engines": {
- "node": ">=18"
+ "node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
@@ -12517,69 +11701,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/ora/node_modules/cli-cursor": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz",
- "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "restore-cursor": "^3.1.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/ora/node_modules/onetime": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
- "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "mimic-fn": "^2.1.0"
- },
- "engines": {
- "node": ">=6"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
- "node_modules/ora/node_modules/restore-cursor": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz",
- "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "onetime": "^5.1.0",
- "signal-exit": "^3.0.2"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/ora/node_modules/signal-exit": {
- "version": "3.0.7",
- "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
- "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
- "dev": true,
- "license": "ISC"
- },
- "node_modules/ora/node_modules/strip-ansi": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/own-keys": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz",
@@ -12641,13 +11762,16 @@
}
},
"node_modules/p-map": {
- "version": "7.0.4",
- "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.4.tgz",
- "integrity": "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==",
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz",
+ "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==",
"dev": true,
"license": "MIT",
+ "dependencies": {
+ "aggregate-error": "^3.0.0"
+ },
"engines": {
- "node": ">=18"
+ "node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
@@ -12699,9 +11823,9 @@
"license": "MIT"
},
"node_modules/parse5": {
- "version": "8.0.0",
- "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz",
- "integrity": "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==",
+ "version": "7.3.0",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz",
+ "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -12772,6 +11896,16 @@
"dev": true,
"license": "ISC"
},
+ "node_modules/path-scurry/node_modules/minipass": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
+ "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ }
+ },
"node_modules/pathe": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
@@ -12809,13 +11943,13 @@
"license": "ISC"
},
"node_modules/picomatch": {
- "version": "2.3.1",
- "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
- "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
"engines": {
- "node": ">=8.6"
+ "node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
@@ -13010,14 +12144,22 @@
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
+ "node_modules/pretty-format/node_modules/react-is": {
+ "version": "17.0.2",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz",
+ "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true
+ },
"node_modules/proc-log": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-5.0.0.tgz",
- "integrity": "sha512-Azwzvl90HaF0aCz1JrDdXQykFakSSNPaPoiZ9fm5qJIMHioDZEi7OAdRwSm6rSoPtY3Qutnm3L7ogmg3dc+wbQ==",
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-2.0.1.tgz",
+ "integrity": "sha512-Kcmo2FhfDTXdcbfDH76N7uBYHINxc/8GW7UAVuVP9I+Va3uHSerrnKV6dLooga/gh7GlgzuCCr/eoldnL1muGw==",
"dev": true,
"license": "ISC",
"engines": {
- "node": "^18.17.0 || >=20.5.0"
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/progress": {
@@ -13063,13 +12205,6 @@
"react-is": "^16.13.1"
}
},
- "node_modules/prop-types/node_modules/react-is": {
- "version": "16.13.1",
- "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
- "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/property-information": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
@@ -13136,12 +12271,12 @@
}
},
"node_modules/react-i18next": {
- "version": "16.5.1",
- "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-16.5.1.tgz",
- "integrity": "sha512-Hks6UIRZWW4c+qDAnx1csVsCGYeIR4MoBGQgJ+NUoNnO6qLxXuf8zu0xdcinyXUORgGzCdRsexxO1Xzv3sTdnw==",
+ "version": "16.5.0",
+ "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-16.5.0.tgz",
+ "integrity": "sha512-IMpPTyCTKxEj8klCrLKUTIUa8uYTd851+jcu2fJuUB9Agkk9Qq8asw4omyeHVnOXHrLgQJGTm5zTvn8HpaPiqw==",
"license": "MIT",
"dependencies": {
- "@babel/runtime": "^7.28.4",
+ "@babel/runtime": "^7.27.6",
"html-parse-stringify": "^3.0.1",
"use-sync-external-store": "^1.6.0"
},
@@ -13163,12 +12298,11 @@
}
},
"node_modules/react-is": {
- "version": "17.0.2",
- "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz",
- "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==",
+ "version": "16.13.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==",
"dev": true,
- "license": "MIT",
- "peer": true
+ "license": "MIT"
},
"node_modules/react-markdown": {
"version": "10.1.0",
@@ -13255,13 +12389,13 @@
}
},
"node_modules/react-resizable-panels": {
- "version": "4.2.0",
- "resolved": "https://registry.npmjs.org/react-resizable-panels/-/react-resizable-panels-4.2.0.tgz",
- "integrity": "sha512-X/WbnyT/bgx09KEGvtJvaTr3axRrcBGcJdELIoGXZipCxc2hPwFsH/pfpVgwNVq5LpQxF/E5pPXGTQdjBnidPw==",
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/react-resizable-panels/-/react-resizable-panels-3.0.6.tgz",
+ "integrity": "sha512-b3qKHQ3MLqOgSS+FRYKapNkJZf5EQzuf6+RLiq1/IlTHw99YrZ2NJZLk4hQIzTnnIkRg2LUqyVinu6YWWpUYew==",
"license": "MIT",
"peerDependencies": {
- "react": "^18.0.0 || ^19.0.0",
- "react-dom": "^18.0.0 || ^19.0.0"
+ "react": "^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc",
+ "react-dom": "^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc"
}
},
"node_modules/react-style-singleton": {
@@ -13327,6 +12461,20 @@
"url": "https://paulmillr.com/funding/"
}
},
+ "node_modules/redent": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz",
+ "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "indent-string": "^4.0.0",
+ "strip-indent": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
"node_modules/reflect.getprototypeof": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz",
@@ -13447,16 +12595,6 @@
"node": ">=0.10.0"
}
},
- "node_modules/require-from-string": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
- "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=0.10.0"
- }
- },
"node_modules/resedit": {
"version": "1.7.2",
"resolved": "https://registry.npmjs.org/resedit/-/resedit-1.7.2.tgz",
@@ -13524,20 +12662,17 @@
}
},
"node_modules/restore-cursor": {
- "version": "5.1.0",
- "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz",
- "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==",
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz",
+ "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "onetime": "^7.0.0",
- "signal-exit": "^4.1.0"
+ "onetime": "^5.1.0",
+ "signal-exit": "^3.0.2"
},
"engines": {
- "node": ">=18"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
+ "node": ">=8"
}
},
"node_modules/retry": {
@@ -13558,18 +12693,55 @@
"license": "MIT"
},
"node_modules/rimraf": {
- "version": "2.6.3",
- "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz",
- "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==",
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
+ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"deprecated": "Rimraf versions prior to v4 are no longer supported",
"dev": true,
"license": "ISC",
- "peer": true,
"dependencies": {
"glob": "^7.1.3"
},
"bin": {
"rimraf": "bin.js"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/rimraf/node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "deprecated": "Glob versions prior to v9 are no longer supported",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/rimraf/node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
}
},
"node_modules/roarr": {
@@ -13592,9 +12764,9 @@
}
},
"node_modules/rollup": {
- "version": "4.54.0",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.54.0.tgz",
- "integrity": "sha512-3nk8Y3a9Ea8szgKhinMlGMhGMw89mqule3KWczxhIzqudyHdCIOHw8WJlj/r329fACjKLEh13ZSk7oE22kyeIw==",
+ "version": "4.53.4",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.4.tgz",
+ "integrity": "sha512-YpXaaArg0MvrnJpvduEDYIp7uGOqKXbH9NsHGQ6SxKCOsNAjZF018MmxefFUulVP2KLtiGw1UvZbr+/ekjvlDg==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -13608,31 +12780,38 @@
"npm": ">=8.0.0"
},
"optionalDependencies": {
- "@rollup/rollup-android-arm-eabi": "4.54.0",
- "@rollup/rollup-android-arm64": "4.54.0",
- "@rollup/rollup-darwin-arm64": "4.54.0",
- "@rollup/rollup-darwin-x64": "4.54.0",
- "@rollup/rollup-freebsd-arm64": "4.54.0",
- "@rollup/rollup-freebsd-x64": "4.54.0",
- "@rollup/rollup-linux-arm-gnueabihf": "4.54.0",
- "@rollup/rollup-linux-arm-musleabihf": "4.54.0",
- "@rollup/rollup-linux-arm64-gnu": "4.54.0",
- "@rollup/rollup-linux-arm64-musl": "4.54.0",
- "@rollup/rollup-linux-loong64-gnu": "4.54.0",
- "@rollup/rollup-linux-ppc64-gnu": "4.54.0",
- "@rollup/rollup-linux-riscv64-gnu": "4.54.0",
- "@rollup/rollup-linux-riscv64-musl": "4.54.0",
- "@rollup/rollup-linux-s390x-gnu": "4.54.0",
- "@rollup/rollup-linux-x64-gnu": "4.54.0",
- "@rollup/rollup-linux-x64-musl": "4.54.0",
- "@rollup/rollup-openharmony-arm64": "4.54.0",
- "@rollup/rollup-win32-arm64-msvc": "4.54.0",
- "@rollup/rollup-win32-ia32-msvc": "4.54.0",
- "@rollup/rollup-win32-x64-gnu": "4.54.0",
- "@rollup/rollup-win32-x64-msvc": "4.54.0",
+ "@rollup/rollup-android-arm-eabi": "4.53.4",
+ "@rollup/rollup-android-arm64": "4.53.4",
+ "@rollup/rollup-darwin-arm64": "4.53.4",
+ "@rollup/rollup-darwin-x64": "4.53.4",
+ "@rollup/rollup-freebsd-arm64": "4.53.4",
+ "@rollup/rollup-freebsd-x64": "4.53.4",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.53.4",
+ "@rollup/rollup-linux-arm-musleabihf": "4.53.4",
+ "@rollup/rollup-linux-arm64-gnu": "4.53.4",
+ "@rollup/rollup-linux-arm64-musl": "4.53.4",
+ "@rollup/rollup-linux-loong64-gnu": "4.53.4",
+ "@rollup/rollup-linux-ppc64-gnu": "4.53.4",
+ "@rollup/rollup-linux-riscv64-gnu": "4.53.4",
+ "@rollup/rollup-linux-riscv64-musl": "4.53.4",
+ "@rollup/rollup-linux-s390x-gnu": "4.53.4",
+ "@rollup/rollup-linux-x64-gnu": "4.53.4",
+ "@rollup/rollup-linux-x64-musl": "4.53.4",
+ "@rollup/rollup-openharmony-arm64": "4.53.4",
+ "@rollup/rollup-win32-arm64-msvc": "4.53.4",
+ "@rollup/rollup-win32-ia32-msvc": "4.53.4",
+ "@rollup/rollup-win32-x64-gnu": "4.53.4",
+ "@rollup/rollup-win32-x64-msvc": "4.53.4",
"fsevents": "~2.3.2"
}
},
+ "node_modules/rrweb-cssom": {
+ "version": "0.8.0",
+ "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz",
+ "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/safe-array-concat": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz",
@@ -13944,17 +13123,11 @@
"license": "ISC"
},
"node_modules/signal-exit": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
- "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
"dev": true,
- "license": "ISC",
- "engines": {
- "node": ">=14"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
+ "license": "ISC"
},
"node_modules/simple-update-notifier": {
"version": "2.0.0",
@@ -14012,18 +13185,31 @@
}
},
"node_modules/socks-proxy-agent": {
- "version": "8.0.5",
- "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz",
- "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==",
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz",
+ "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==",
"dev": true,
"license": "MIT",
"dependencies": {
- "agent-base": "^7.1.2",
- "debug": "^4.3.4",
- "socks": "^2.8.3"
+ "agent-base": "^6.0.2",
+ "debug": "^4.3.3",
+ "socks": "^2.6.2"
},
"engines": {
- "node": ">= 14"
+ "node": ">= 10"
+ }
+ },
+ "node_modules/socks-proxy-agent/node_modules/agent-base": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
+ "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 6.0.0"
}
},
"node_modules/source-map": {
@@ -14076,16 +13262,16 @@
"optional": true
},
"node_modules/ssri": {
- "version": "12.0.0",
- "resolved": "https://registry.npmjs.org/ssri/-/ssri-12.0.0.tgz",
- "integrity": "sha512-S7iGNosepx9RadX82oimUkvr0Ct7IjJbEbs4mJcTxst8um95J3sDYU1RBEOvdu6oL1Wek2ODI5i4MAw+dZ6cAQ==",
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/ssri/-/ssri-9.0.1.tgz",
+ "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==",
"dev": true,
"license": "ISC",
"dependencies": {
- "minipass": "^7.0.3"
+ "minipass": "^3.1.1"
},
"engines": {
- "node": "^18.17.0 || >=20.5.0"
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/stackback": {
@@ -14177,32 +13363,6 @@
"node": ">=8"
}
},
- "node_modules/string-width-cjs/node_modules/strip-ansi": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/string-width/node_modules/strip-ansi": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/string.prototype.matchall": {
"version": "4.0.12",
"resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz",
@@ -14316,19 +13476,16 @@
}
},
"node_modules/strip-ansi": {
- "version": "7.1.2",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
- "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"license": "MIT",
"dependencies": {
- "ansi-regex": "^6.0.1"
+ "ansi-regex": "^5.0.1"
},
"engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/strip-ansi?sponsor=1"
+ "node": ">=8"
}
},
"node_modules/strip-ansi-cjs": {
@@ -14345,17 +13502,17 @@
"node": ">=8"
}
},
- "node_modules/strip-ansi/node_modules/ansi-regex": {
- "version": "6.2.2",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
- "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
+ "node_modules/strip-indent": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz",
+ "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==",
"dev": true,
"license": "MIT",
- "engines": {
- "node": ">=12"
+ "dependencies": {
+ "min-indent": "^1.0.0"
},
- "funding": {
- "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ "engines": {
+ "node": ">=8"
}
},
"node_modules/strip-json-comments": {
@@ -14470,78 +13627,25 @@
"resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz",
"integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==",
"dev": true,
- "license": "ISC",
- "dependencies": {
- "chownr": "^2.0.0",
- "fs-minipass": "^2.0.0",
- "minipass": "^5.0.0",
- "minizlib": "^2.1.1",
- "mkdirp": "^1.0.3",
- "yallist": "^4.0.0"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/tar/node_modules/fs-minipass": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz",
- "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "minipass": "^3.0.0"
- },
- "engines": {
- "node": ">= 8"
- }
- },
- "node_modules/tar/node_modules/fs-minipass/node_modules/minipass": {
- "version": "3.3.6",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz",
- "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "yallist": "^4.0.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/tar/node_modules/minipass": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz",
- "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/tar/node_modules/minizlib": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz",
- "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==",
- "dev": true,
- "license": "MIT",
+ "license": "ISC",
"dependencies": {
- "minipass": "^3.0.0",
+ "chownr": "^2.0.0",
+ "fs-minipass": "^2.0.0",
+ "minipass": "^5.0.0",
+ "minizlib": "^2.1.1",
+ "mkdirp": "^1.0.3",
"yallist": "^4.0.0"
},
"engines": {
- "node": ">= 8"
+ "node": ">=10"
}
},
- "node_modules/tar/node_modules/minizlib/node_modules/minipass": {
- "version": "3.3.6",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz",
- "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==",
+ "node_modules/tar/node_modules/minipass": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz",
+ "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==",
"dev": true,
"license": "ISC",
- "dependencies": {
- "yallist": "^4.0.0"
- },
"engines": {
"node": ">=8"
}
@@ -14579,42 +13683,41 @@
"fs-extra": "^10.0.0"
}
},
- "node_modules/temp-file/node_modules/fs-extra": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
- "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+ "node_modules/temp/node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "deprecated": "Glob versions prior to v9 are no longer supported",
"dev": true,
- "license": "MIT",
+ "license": "ISC",
+ "peer": true,
"dependencies": {
- "graceful-fs": "^4.2.0",
- "jsonfile": "^6.0.1",
- "universalify": "^2.0.0"
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
},
"engines": {
- "node": ">=12"
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
}
},
- "node_modules/temp-file/node_modules/jsonfile": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
- "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
+ "node_modules/temp/node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"dev": true,
- "license": "MIT",
+ "license": "ISC",
+ "peer": true,
"dependencies": {
- "universalify": "^2.0.0"
+ "brace-expansion": "^1.1.7"
},
- "optionalDependencies": {
- "graceful-fs": "^4.1.6"
- }
- },
- "node_modules/temp-file/node_modules/universalify": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
- "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
- "dev": true,
- "license": "MIT",
"engines": {
- "node": ">= 10.0.0"
+ "node": "*"
}
},
"node_modules/temp/node_modules/mkdirp": {
@@ -14631,6 +13734,21 @@
"mkdirp": "bin/cmd.js"
}
},
+ "node_modules/temp/node_modules/rimraf": {
+ "version": "2.6.3",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz",
+ "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==",
+ "deprecated": "Rimraf versions prior to v4 are no longer supported",
+ "dev": true,
+ "license": "ISC",
+ "peer": true,
+ "dependencies": {
+ "glob": "^7.1.3"
+ },
+ "bin": {
+ "rimraf": "bin.js"
+ }
+ },
"node_modules/tiny-async-pool": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/tiny-async-pool/-/tiny-async-pool-1.3.0.tgz",
@@ -14691,37 +13809,6 @@
"url": "https://github.com/sponsors/SuperchupuDev"
}
},
- "node_modules/tinyglobby/node_modules/fdir": {
- "version": "6.5.0",
- "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
- "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=12.0.0"
- },
- "peerDependencies": {
- "picomatch": "^3 || ^4"
- },
- "peerDependenciesMeta": {
- "picomatch": {
- "optional": true
- }
- }
- },
- "node_modules/tinyglobby/node_modules/picomatch": {
- "version": "4.0.3",
- "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
- "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/sponsors/jonschlinkert"
- }
- },
"node_modules/tinyrainbow": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz",
@@ -14733,22 +13820,22 @@
}
},
"node_modules/tldts": {
- "version": "7.0.19",
- "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.19.tgz",
- "integrity": "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==",
+ "version": "6.1.86",
+ "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.86.tgz",
+ "integrity": "sha512-WMi/OQ2axVTf/ykqCQgXiIct+mSQDFdH2fkwhPwgEwvJ1kSzZRiinb0zF2Xb8u4+OqPChmyI6MEu4EezNJz+FQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "tldts-core": "^7.0.19"
+ "tldts-core": "^6.1.86"
},
"bin": {
"tldts": "bin/cli.js"
}
},
"node_modules/tldts-core": {
- "version": "7.0.19",
- "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.19.tgz",
- "integrity": "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==",
+ "version": "6.1.86",
+ "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.86.tgz",
+ "integrity": "sha512-Je6p7pkk+KMzMv2XXKmAE3McmolOQFdxkKw0R8EYNr7sELW46JqnNeTX8ybPiQgvg1ymCoF8LXs5fzFaZvJPTA==",
"dev": true,
"license": "MIT"
},
@@ -14786,29 +13873,29 @@
}
},
"node_modules/tough-cookie": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.0.tgz",
- "integrity": "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==",
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-5.1.2.tgz",
+ "integrity": "sha512-FVDYdxtnj0G6Qm/DhNPSb8Ju59ULcup3tuJxkFb5K8Bv2pUXILbf0xZWU8PX8Ov19OXljbUyveOFwRMwkXzO+A==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
- "tldts": "^7.0.5"
+ "tldts": "^6.1.32"
},
"engines": {
"node": ">=16"
}
},
"node_modules/tr46": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz",
- "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==",
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.1.1.tgz",
+ "integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==",
"dev": true,
"license": "MIT",
"dependencies": {
"punycode": "^2.3.1"
},
"engines": {
- "node": ">=20"
+ "node": ">=18"
}
},
"node_modules/trim-lines": {
@@ -14842,9 +13929,9 @@
}
},
"node_modules/ts-api-utils": {
- "version": "2.3.0",
- "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.3.0.tgz",
- "integrity": "sha512-6eg3Y9SF7SsAvGzRHQvvc1skDAhwI4YQ32ui1scxD1Ccr0G5qIIbUBT3pFTKX8kmWIQClHobtUdNuaBgwdfdWg==",
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz",
+ "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==",
"dev": true,
"license": "MIT",
"engines": {
@@ -14980,16 +14067,16 @@
}
},
"node_modules/typescript-eslint": {
- "version": "8.51.0",
- "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.51.0.tgz",
- "integrity": "sha512-jh8ZuM5oEh2PSdyQG9YAEM1TCGuWenLSuSUhf/irbVUNW9O5FhbFVONviN2TgMTBnUmyHv7E56rYnfLZK6TkiA==",
+ "version": "8.49.0",
+ "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.49.0.tgz",
+ "integrity": "sha512-zRSVH1WXD0uXczCXw+nsdjGPUdx4dfrs5VQoHnUWmv1U3oNlAKv4FUNdLDhVUg+gYn+a5hUESqch//Rv5wVhrg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@typescript-eslint/eslint-plugin": "8.51.0",
- "@typescript-eslint/parser": "8.51.0",
- "@typescript-eslint/typescript-estree": "8.51.0",
- "@typescript-eslint/utils": "8.51.0"
+ "@typescript-eslint/eslint-plugin": "8.49.0",
+ "@typescript-eslint/parser": "8.49.0",
+ "@typescript-eslint/typescript-estree": "8.49.0",
+ "@typescript-eslint/utils": "8.49.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -15049,29 +14136,29 @@
}
},
"node_modules/unique-filename": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-4.0.0.tgz",
- "integrity": "sha512-XSnEewXmQ+veP7xX2dS5Q4yZAvO40cBN2MWkJ7D/6sW4Dg6wYBNwM1Vrnz1FhH5AdeLIlUXRI9e28z1YZi71NQ==",
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-2.0.1.tgz",
+ "integrity": "sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==",
"dev": true,
"license": "ISC",
"dependencies": {
- "unique-slug": "^5.0.0"
+ "unique-slug": "^3.0.0"
},
"engines": {
- "node": "^18.17.0 || >=20.5.0"
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/unique-slug": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-5.0.0.tgz",
- "integrity": "sha512-9OdaqO5kwqR+1kVgHAhsp5vPNU0hnxRa26rBFNfNgM7M6pNtgzeBn3s/xbyCQL3dcjzOatcef6UUHpB/6MaETg==",
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-3.0.0.tgz",
+ "integrity": "sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w==",
"dev": true,
"license": "ISC",
"dependencies": {
"imurmurhash": "^0.1.4"
},
"engines": {
- "node": "^18.17.0 || >=20.5.0"
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/unist-util-is": {
@@ -15143,19 +14230,18 @@
}
},
"node_modules/universalify": {
- "version": "0.1.2",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
- "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
- "dev": true,
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
+ "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
"license": "MIT",
"engines": {
- "node": ">= 4.0.0"
+ "node": ">= 10.0.0"
}
},
"node_modules/update-browserslist-db": {
- "version": "1.2.3",
- "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz",
- "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==",
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.2.tgz",
+ "integrity": "sha512-E85pfNzMQ9jpKkA7+TJAi4TJN+tBCuWh5rUcS/sv6cFi+1q9LYDwDI5dpUL0u/73EElyQ8d3TEaeW4sPedBqYA==",
"dev": true,
"funding": [
{
@@ -15391,9 +14477,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/aix-ppc64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz",
- "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.1.tgz",
+ "integrity": "sha512-HHB50pdsBX6k47S4u5g/CaLjqS3qwaOVE5ILsq64jyzgMhLuCuZ8rGzM9yhsAjfjkbgUPMzZEPa7DAp7yz6vuA==",
"cpu": [
"ppc64"
],
@@ -15408,9 +14494,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/android-arm": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz",
- "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.1.tgz",
+ "integrity": "sha512-kFqa6/UcaTbGm/NncN9kzVOODjhZW8e+FRdSeypWe6j33gzclHtwlANs26JrupOntlcWmB0u8+8HZo8s7thHvg==",
"cpu": [
"arm"
],
@@ -15425,9 +14511,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/android-arm64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz",
- "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.1.tgz",
+ "integrity": "sha512-45fuKmAJpxnQWixOGCrS+ro4Uvb4Re9+UTieUY2f8AEc+t7d4AaZ6eUJ3Hva7dtrxAAWHtlEFsXFMAgNnGU9uQ==",
"cpu": [
"arm64"
],
@@ -15442,9 +14528,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/android-x64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz",
- "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.1.tgz",
+ "integrity": "sha512-LBEpOz0BsgMEeHgenf5aqmn/lLNTFXVfoWMUox8CtWWYK9X4jmQzWjoGoNb8lmAYml/tQ/Ysvm8q7szu7BoxRQ==",
"cpu": [
"x64"
],
@@ -15459,9 +14545,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/darwin-arm64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz",
- "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.1.tgz",
+ "integrity": "sha512-veg7fL8eMSCVKL7IW4pxb54QERtedFDfY/ASrumK/SbFsXnRazxY4YykN/THYqFnFwJ0aVjiUrVG2PwcdAEqQQ==",
"cpu": [
"arm64"
],
@@ -15476,9 +14562,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/darwin-x64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz",
- "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.1.tgz",
+ "integrity": "sha512-+3ELd+nTzhfWb07Vol7EZ+5PTbJ/u74nC6iv4/lwIU99Ip5uuY6QoIf0Hn4m2HoV0qcnRivN3KSqc+FyCHjoVQ==",
"cpu": [
"x64"
],
@@ -15493,9 +14579,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/freebsd-arm64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz",
- "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.1.tgz",
+ "integrity": "sha512-/8Rfgns4XD9XOSXlzUDepG8PX+AVWHliYlUkFI3K3GB6tqbdjYqdhcb4BKRd7C0BhZSoaCxhv8kTcBrcZWP+xg==",
"cpu": [
"arm64"
],
@@ -15510,9 +14596,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/freebsd-x64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz",
- "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.1.tgz",
+ "integrity": "sha512-GITpD8dK9C+r+5yRT/UKVT36h/DQLOHdwGVwwoHidlnA168oD3uxA878XloXebK4Ul3gDBBIvEdL7go9gCUFzQ==",
"cpu": [
"x64"
],
@@ -15527,9 +14613,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/linux-arm": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz",
- "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.1.tgz",
+ "integrity": "sha512-ieMID0JRZY/ZeCrsFQ3Y3NlHNCqIhTprJfDgSB3/lv5jJZ8FX3hqPyXWhe+gvS5ARMBJ242PM+VNz/ctNj//eA==",
"cpu": [
"arm"
],
@@ -15544,9 +14630,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/linux-arm64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz",
- "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.1.tgz",
+ "integrity": "sha512-W9//kCrh/6in9rWIBdKaMtuTTzNj6jSeG/haWBADqLLa9P8O5YSRDzgD5y9QBok4AYlzS6ARHifAb75V6G670Q==",
"cpu": [
"arm64"
],
@@ -15561,9 +14647,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/linux-ia32": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz",
- "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.1.tgz",
+ "integrity": "sha512-VIUV4z8GD8rtSVMfAj1aXFahsi/+tcoXXNYmXgzISL+KB381vbSTNdeZHHHIYqFyXcoEhu9n5cT+05tRv13rlw==",
"cpu": [
"ia32"
],
@@ -15578,9 +14664,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/linux-loong64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz",
- "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.1.tgz",
+ "integrity": "sha512-l4rfiiJRN7sTNI//ff65zJ9z8U+k6zcCg0LALU5iEWzY+a1mVZ8iWC1k5EsNKThZ7XCQ6YWtsZ8EWYm7r1UEsg==",
"cpu": [
"loong64"
],
@@ -15595,9 +14681,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/linux-mips64el": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz",
- "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.1.tgz",
+ "integrity": "sha512-U0bEuAOLvO/DWFdygTHWY8C067FXz+UbzKgxYhXC0fDieFa0kDIra1FAhsAARRJbvEyso8aAqvPdNxzWuStBnA==",
"cpu": [
"mips64el"
],
@@ -15612,9 +14698,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/linux-ppc64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz",
- "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.1.tgz",
+ "integrity": "sha512-NzdQ/Xwu6vPSf/GkdmRNsOfIeSGnh7muundsWItmBsVpMoNPVpM61qNzAVY3pZ1glzzAxLR40UyYM23eaDDbYQ==",
"cpu": [
"ppc64"
],
@@ -15629,9 +14715,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/linux-riscv64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz",
- "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.1.tgz",
+ "integrity": "sha512-7zlw8p3IApcsN7mFw0O1Z1PyEk6PlKMu18roImfl3iQHTnr/yAfYv6s4hXPidbDoI2Q0pW+5xeoM4eTCC0UdrQ==",
"cpu": [
"riscv64"
],
@@ -15646,9 +14732,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/linux-s390x": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz",
- "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.1.tgz",
+ "integrity": "sha512-cGj5wli+G+nkVQdZo3+7FDKC25Uh4ZVwOAK6A06Hsvgr8WqBBuOy/1s+PUEd/6Je+vjfm6stX0kmib5b/O2Ykw==",
"cpu": [
"s390x"
],
@@ -15663,9 +14749,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/linux-x64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz",
- "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.1.tgz",
+ "integrity": "sha512-z3H/HYI9MM0HTv3hQZ81f+AKb+yEoCRlUby1F80vbQ5XdzEMyY/9iNlAmhqiBKw4MJXwfgsh7ERGEOhrM1niMA==",
"cpu": [
"x64"
],
@@ -15680,9 +14766,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/netbsd-arm64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz",
- "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.1.tgz",
+ "integrity": "sha512-wzC24DxAvk8Em01YmVXyjl96Mr+ecTPyOuADAvjGg+fyBpGmxmcr2E5ttf7Im8D0sXZihpxzO1isus8MdjMCXQ==",
"cpu": [
"arm64"
],
@@ -15697,9 +14783,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/netbsd-x64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz",
- "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.1.tgz",
+ "integrity": "sha512-1YQ8ybGi2yIXswu6eNzJsrYIGFpnlzEWRl6iR5gMgmsrR0FcNoV1m9k9sc3PuP5rUBLshOZylc9nqSgymI+TYg==",
"cpu": [
"x64"
],
@@ -15714,9 +14800,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/openbsd-arm64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz",
- "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.1.tgz",
+ "integrity": "sha512-5Z+DzLCrq5wmU7RDaMDe2DVXMRm2tTDvX2KU14JJVBN2CT/qov7XVix85QoJqHltpvAOZUAc3ndU56HSMWrv8g==",
"cpu": [
"arm64"
],
@@ -15731,9 +14817,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/openbsd-x64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz",
- "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.1.tgz",
+ "integrity": "sha512-Q73ENzIdPF5jap4wqLtsfh8YbYSZ8Q0wnxplOlZUOyZy7B4ZKW8DXGWgTCZmF8VWD7Tciwv5F4NsRf6vYlZtqg==",
"cpu": [
"x64"
],
@@ -15748,9 +14834,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/openharmony-arm64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz",
- "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.1.tgz",
+ "integrity": "sha512-ajbHrGM/XiK+sXM0JzEbJAen+0E+JMQZ2l4RR4VFwvV9JEERx+oxtgkpoKv1SevhjavK2z2ReHk32pjzktWbGg==",
"cpu": [
"arm64"
],
@@ -15765,9 +14851,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/sunos-x64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz",
- "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.1.tgz",
+ "integrity": "sha512-IPUW+y4VIjuDVn+OMzHc5FV4GubIwPnsz6ubkvN8cuhEqH81NovB53IUlrlBkPMEPxvNnf79MGBoz8rZ2iW8HA==",
"cpu": [
"x64"
],
@@ -15782,9 +14868,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/win32-arm64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz",
- "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.1.tgz",
+ "integrity": "sha512-RIVRWiljWA6CdVu8zkWcRmGP7iRRIIwvhDKem8UMBjPql2TXM5PkDVvvrzMtj1V+WFPB4K7zkIGM7VzRtFkjdg==",
"cpu": [
"arm64"
],
@@ -15799,9 +14885,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/win32-ia32": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz",
- "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.1.tgz",
+ "integrity": "sha512-2BR5M8CPbptC1AK5JbJT1fWrHLvejwZidKx3UMSF0ecHMa+smhi16drIrCEggkgviBwLYd5nwrFLSl5Kho96RQ==",
"cpu": [
"ia32"
],
@@ -15816,9 +14902,9 @@
}
},
"node_modules/vite/node_modules/@esbuild/win32-x64": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz",
- "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.1.tgz",
+ "integrity": "sha512-d5X6RMYv6taIymSk8JBP+nxv8DQAMY6A51GPgusqLdK9wBz5wWIXy1KjTck6HnjE9hqJzJRdk+1p/t5soSbCtw==",
"cpu": [
"x64"
],
@@ -15833,9 +14919,9 @@
}
},
"node_modules/vite/node_modules/esbuild": {
- "version": "0.27.2",
- "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz",
- "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==",
+ "version": "0.27.1",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.1.tgz",
+ "integrity": "sha512-yY35KZckJJuVVPXpvjgxiCuVEJT67F6zDeVTv4rizyPrfGBUpZQsvmxnN+C371c2esD/hNMjj4tpBhuueLN7aA==",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
@@ -15846,50 +14932,32 @@
"node": ">=18"
},
"optionalDependencies": {
- "@esbuild/aix-ppc64": "0.27.2",
- "@esbuild/android-arm": "0.27.2",
- "@esbuild/android-arm64": "0.27.2",
- "@esbuild/android-x64": "0.27.2",
- "@esbuild/darwin-arm64": "0.27.2",
- "@esbuild/darwin-x64": "0.27.2",
- "@esbuild/freebsd-arm64": "0.27.2",
- "@esbuild/freebsd-x64": "0.27.2",
- "@esbuild/linux-arm": "0.27.2",
- "@esbuild/linux-arm64": "0.27.2",
- "@esbuild/linux-ia32": "0.27.2",
- "@esbuild/linux-loong64": "0.27.2",
- "@esbuild/linux-mips64el": "0.27.2",
- "@esbuild/linux-ppc64": "0.27.2",
- "@esbuild/linux-riscv64": "0.27.2",
- "@esbuild/linux-s390x": "0.27.2",
- "@esbuild/linux-x64": "0.27.2",
- "@esbuild/netbsd-arm64": "0.27.2",
- "@esbuild/netbsd-x64": "0.27.2",
- "@esbuild/openbsd-arm64": "0.27.2",
- "@esbuild/openbsd-x64": "0.27.2",
- "@esbuild/openharmony-arm64": "0.27.2",
- "@esbuild/sunos-x64": "0.27.2",
- "@esbuild/win32-arm64": "0.27.2",
- "@esbuild/win32-ia32": "0.27.2",
- "@esbuild/win32-x64": "0.27.2"
- }
- },
- "node_modules/vite/node_modules/fdir": {
- "version": "6.5.0",
- "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
- "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=12.0.0"
- },
- "peerDependencies": {
- "picomatch": "^3 || ^4"
- },
- "peerDependenciesMeta": {
- "picomatch": {
- "optional": true
- }
+ "@esbuild/aix-ppc64": "0.27.1",
+ "@esbuild/android-arm": "0.27.1",
+ "@esbuild/android-arm64": "0.27.1",
+ "@esbuild/android-x64": "0.27.1",
+ "@esbuild/darwin-arm64": "0.27.1",
+ "@esbuild/darwin-x64": "0.27.1",
+ "@esbuild/freebsd-arm64": "0.27.1",
+ "@esbuild/freebsd-x64": "0.27.1",
+ "@esbuild/linux-arm": "0.27.1",
+ "@esbuild/linux-arm64": "0.27.1",
+ "@esbuild/linux-ia32": "0.27.1",
+ "@esbuild/linux-loong64": "0.27.1",
+ "@esbuild/linux-mips64el": "0.27.1",
+ "@esbuild/linux-ppc64": "0.27.1",
+ "@esbuild/linux-riscv64": "0.27.1",
+ "@esbuild/linux-s390x": "0.27.1",
+ "@esbuild/linux-x64": "0.27.1",
+ "@esbuild/netbsd-arm64": "0.27.1",
+ "@esbuild/netbsd-x64": "0.27.1",
+ "@esbuild/openbsd-arm64": "0.27.1",
+ "@esbuild/openbsd-x64": "0.27.1",
+ "@esbuild/openharmony-arm64": "0.27.1",
+ "@esbuild/sunos-x64": "0.27.1",
+ "@esbuild/win32-arm64": "0.27.1",
+ "@esbuild/win32-ia32": "0.27.1",
+ "@esbuild/win32-x64": "0.27.1"
}
},
"node_modules/vite/node_modules/fsevents": {
@@ -15907,33 +14975,20 @@
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
- "node_modules/vite/node_modules/picomatch": {
- "version": "4.0.3",
- "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
- "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/sponsors/jonschlinkert"
- }
- },
"node_modules/vitest": {
- "version": "4.0.16",
- "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.16.tgz",
- "integrity": "sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.15.tgz",
+ "integrity": "sha512-n1RxDp8UJm6N0IbJLQo+yzLZ2sQCDyl1o0LeugbPWf8+8Fttp29GghsQBjYJVmWq3gBFfe9Hs1spR44vovn2wA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/expect": "4.0.16",
- "@vitest/mocker": "4.0.16",
- "@vitest/pretty-format": "4.0.16",
- "@vitest/runner": "4.0.16",
- "@vitest/snapshot": "4.0.16",
- "@vitest/spy": "4.0.16",
- "@vitest/utils": "4.0.16",
+ "@vitest/expect": "4.0.15",
+ "@vitest/mocker": "4.0.15",
+ "@vitest/pretty-format": "4.0.15",
+ "@vitest/runner": "4.0.15",
+ "@vitest/snapshot": "4.0.15",
+ "@vitest/spy": "4.0.15",
+ "@vitest/utils": "4.0.15",
"es-module-lexer": "^1.7.0",
"expect-type": "^1.2.2",
"magic-string": "^0.30.21",
@@ -15961,10 +15016,10 @@
"@edge-runtime/vm": "*",
"@opentelemetry/api": "^1.9.0",
"@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0",
- "@vitest/browser-playwright": "4.0.16",
- "@vitest/browser-preview": "4.0.16",
- "@vitest/browser-webdriverio": "4.0.16",
- "@vitest/ui": "4.0.16",
+ "@vitest/browser-playwright": "4.0.15",
+ "@vitest/browser-preview": "4.0.15",
+ "@vitest/browser-webdriverio": "4.0.15",
+ "@vitest/ui": "4.0.15",
"happy-dom": "*",
"jsdom": "*"
},
@@ -15998,19 +15053,6 @@
}
}
},
- "node_modules/vitest/node_modules/picomatch": {
- "version": "4.0.3",
- "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
- "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/sponsors/jonschlinkert"
- }
- },
"node_modules/void-elements": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz",
@@ -16044,13 +15086,26 @@
}
},
"node_modules/webidl-conversions": {
- "version": "8.0.0",
- "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.0.tgz",
- "integrity": "sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA==",
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz",
+ "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==",
"dev": true,
"license": "BSD-2-Clause",
"engines": {
- "node": ">=20"
+ "node": ">=12"
+ }
+ },
+ "node_modules/whatwg-encoding": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz",
+ "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "iconv-lite": "0.6.3"
+ },
+ "engines": {
+ "node": ">=18"
}
},
"node_modules/whatwg-mimetype": {
@@ -16064,17 +15119,17 @@
}
},
"node_modules/whatwg-url": {
- "version": "15.1.0",
- "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-15.1.0.tgz",
- "integrity": "sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==",
+ "version": "14.2.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.2.0.tgz",
+ "integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "tr46": "^6.0.0",
- "webidl-conversions": "^8.0.0"
+ "tr46": "^5.1.0",
+ "webidl-conversions": "^7.0.0"
},
"engines": {
- "node": ">=20"
+ "node": ">=18"
}
},
"node_modules/which": {
@@ -16210,18 +15265,18 @@
}
},
"node_modules/wrap-ansi": {
- "version": "9.0.2",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
- "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dev": true,
"license": "MIT",
"dependencies": {
- "ansi-styles": "^6.2.1",
- "string-width": "^7.0.0",
- "strip-ansi": "^7.1.0"
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
},
"engines": {
- "node": ">=18"
+ "node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
@@ -16246,57 +15301,6 @@
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
- "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/wrap-ansi/node_modules/ansi-styles": {
- "version": "6.2.3",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
- "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-styles?sponsor=1"
- }
- },
- "node_modules/wrap-ansi/node_modules/emoji-regex": {
- "version": "10.6.0",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz",
- "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/wrap-ansi/node_modules/string-width": {
- "version": "7.2.0",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
- "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "emoji-regex": "^10.3.0",
- "get-east-asian-width": "^1.0.0",
- "strip-ansi": "^7.1.0"
- },
- "engines": {
- "node": ">=18"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
@@ -16440,9 +15444,10 @@
}
},
"node_modules/zod": {
- "version": "4.3.4",
- "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.4.tgz",
- "integrity": "sha512-Zw/uYiiyF6pUT1qmKbZziChgNPRu+ZRneAsMUDU6IwmXdWt5JwcUfy2bvLOCUtz5UniaN/Zx5aFttZYbYc7O/A==",
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-4.2.0.tgz",
+ "integrity": "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw==",
+ "dev": true,
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
diff --git a/apps/frontend/package.json b/apps/frontend/package.json
index 1561b64046..3b9e8bda37 100644
--- a/apps/frontend/package.json
+++ b/apps/frontend/package.json
@@ -48,6 +48,7 @@
"typecheck": "tsc --noEmit"
},
"dependencies": {
+ "@anthropic-ai/sdk": "^0.71.2",
"@dnd-kit/core": "^6.3.1",
"@dnd-kit/sortable": "^10.0.0",
"@dnd-kit/utilities": "^3.2.2",
@@ -68,6 +69,7 @@
"@radix-ui/react-tabs": "^1.1.13",
"@radix-ui/react-toast": "^1.2.15",
"@radix-ui/react-tooltip": "^1.2.8",
+ "@sentry/electron": "^7.5.0",
"@tailwindcss/typography": "^0.5.19",
"@tanstack/react-virtual": "^3.13.13",
"@xterm/addon-fit": "^0.11.0",
@@ -78,11 +80,14 @@
"chokidar": "^5.0.0",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
+ "dotenv": "^16.6.1",
"electron-log": "^5.4.3",
"electron-updater": "^6.6.2",
"i18next": "^25.7.3",
"lucide-react": "^0.562.0",
+ "minimatch": "^10.1.1",
"motion": "^12.23.26",
+ "proper-lockfile": "^4.1.2",
"react": "^19.2.3",
"react-dom": "^19.2.3",
"react-i18next": "^16.5.0",
@@ -102,7 +107,9 @@
"@eslint/js": "^9.39.1",
"@playwright/test": "^1.52.0",
"@tailwindcss/postcss": "^4.1.17",
+ "@testing-library/jest-dom": "^6.9.1",
"@testing-library/react": "^16.1.0",
+ "@types/minimatch": "^5.1.2",
"@types/node": "^25.0.0",
"@types/react": "^19.2.7",
"@types/react-dom": "^19.2.3",
@@ -111,7 +118,7 @@
"@vitejs/plugin-react": "^5.1.2",
"autoprefixer": "^10.4.22",
"cross-env": "^10.1.0",
- "electron": "^39.2.7",
+ "electron": "39.2.7",
"electron-builder": "^26.0.12",
"electron-vite": "^5.0.0",
"eslint": "^9.39.1",
@@ -207,7 +214,7 @@
]
},
"linux": {
- "icon": "resources/icon.png",
+ "icon": "resources/icons",
"target": [
"AppImage",
"deb",
diff --git a/apps/frontend/resources/icons/128x128.png b/apps/frontend/resources/icons/128x128.png
new file mode 100644
index 0000000000..7e694b434c
Binary files /dev/null and b/apps/frontend/resources/icons/128x128.png differ
diff --git a/apps/frontend/resources/icons/16x16.png b/apps/frontend/resources/icons/16x16.png
new file mode 100644
index 0000000000..bc533838b6
Binary files /dev/null and b/apps/frontend/resources/icons/16x16.png differ
diff --git a/apps/frontend/resources/icons/256x256.png b/apps/frontend/resources/icons/256x256.png
new file mode 100644
index 0000000000..555230d363
Binary files /dev/null and b/apps/frontend/resources/icons/256x256.png differ
diff --git a/apps/frontend/resources/icons/32x32.png b/apps/frontend/resources/icons/32x32.png
new file mode 100644
index 0000000000..227e6db694
Binary files /dev/null and b/apps/frontend/resources/icons/32x32.png differ
diff --git a/apps/frontend/resources/icons/48x48.png b/apps/frontend/resources/icons/48x48.png
new file mode 100644
index 0000000000..29e6b3bc03
Binary files /dev/null and b/apps/frontend/resources/icons/48x48.png differ
diff --git a/apps/frontend/resources/icons/512x512.png b/apps/frontend/resources/icons/512x512.png
new file mode 100644
index 0000000000..22d476ffc1
Binary files /dev/null and b/apps/frontend/resources/icons/512x512.png differ
diff --git a/apps/frontend/resources/icons/64x64.png b/apps/frontend/resources/icons/64x64.png
new file mode 100644
index 0000000000..0068c05929
Binary files /dev/null and b/apps/frontend/resources/icons/64x64.png differ
diff --git a/apps/frontend/scripts/download-python.cjs b/apps/frontend/scripts/download-python.cjs
index 215af7db3c..17f9abdf65 100644
--- a/apps/frontend/scripts/download-python.cjs
+++ b/apps/frontend/scripts/download-python.cjs
@@ -609,12 +609,14 @@ function installPackages(pythonBin, requirementsPath, targetSitePackages) {
// Install packages directly to target directory
// --no-compile: Don't create .pyc files (saves space, Python will work without them)
- // --no-cache-dir: Don't use pip cache
// --target: Install to specific directory
+ // --only-binary: Force binary wheels for pydantic (prevents silent source build failures)
+ // Note: We intentionally DO use pip's cache to preserve built wheels for packages
+ // like real_ladybug that must be compiled from source on Intel Mac (no PyPI wheel)
const pipArgs = [
'-m', 'pip', 'install',
'--no-compile',
- '--no-cache-dir',
+ '--only-binary', 'pydantic,pydantic-core',
'--target', targetSitePackages,
'-r', requirementsPath,
];
@@ -702,9 +704,32 @@ async function downloadPython(targetPlatform, targetArch, options = {}) {
try {
const version = verifyPythonBinary(pythonBin);
console.log(`[download-python] Verified: ${version}`);
- return { success: true, pythonPath: pythonBin, sitePackagesPath: sitePackagesDir };
- } catch {
- console.log(`[download-python] Existing installation is broken, re-downloading...`);
+
+ // Verify critical packages exist (fixes GitHub issue #416)
+ // Without this check, corrupted caches with missing packages would be accepted
+ // Note: Same list exists in python-env-manager.ts - keep them in sync
+ // This validation assumes traditional Python packages with __init__.py (not PEP 420 namespace packages)
+ const criticalPackages = ['claude_agent_sdk', 'dotenv', 'pydantic_core'];
+ const missingPackages = criticalPackages.filter(pkg => {
+ const pkgPath = path.join(sitePackagesDir, pkg);
+ // Check both directory and __init__.py for more robust validation
+ const initFile = path.join(pkgPath, '__init__.py');
+ return !fs.existsSync(pkgPath) || !fs.existsSync(initFile);
+ });
+
+ if (missingPackages.length > 0) {
+ console.log(`[download-python] Critical packages missing or incomplete: ${missingPackages.join(', ')}`);
+ console.log(`[download-python] Reinstalling packages...`);
+ // Remove site-packages to force reinstall, keep Python binary
+ // Flow continues below to re-install packages (skipPackages check at line 794)
+ fs.rmSync(sitePackagesDir, { recursive: true, force: true });
+ } else {
+ console.log(`[download-python] All critical packages verified`);
+ return { success: true, pythonPath: pythonBin, sitePackagesPath: sitePackagesDir };
+ }
+ } catch (err) {
+ const errorMsg = err instanceof Error ? err.message : String(err);
+ console.log(`[download-python] Existing installation is broken: ${errorMsg}`);
fs.rmSync(platformDir, { recursive: true, force: true });
}
}
@@ -784,6 +809,22 @@ async function downloadPython(targetPlatform, targetArch, options = {}) {
// Install packages
installPackages(pythonBin, requirementsPath, sitePackagesDir);
+ // Verify critical packages were installed before creating marker (fixes #416)
+ // Note: Same list exists in python-env-manager.ts - keep them in sync
+ // This validation assumes traditional Python packages with __init__.py (not PEP 420 namespace packages)
+ const criticalPackages = ['claude_agent_sdk', 'dotenv', 'pydantic_core'];
+ const postInstallMissing = criticalPackages.filter(pkg => {
+ const pkgPath = path.join(sitePackagesDir, pkg);
+ const initFile = path.join(pkgPath, '__init__.py');
+ return !fs.existsSync(pkgPath) || !fs.existsSync(initFile);
+ });
+
+ if (postInstallMissing.length > 0) {
+ throw new Error(`Package installation failed - missing critical packages: ${postInstallMissing.join(', ')}`);
+ }
+
+ console.log(`[download-python] All critical packages verified after installation`);
+
// Create marker file to indicate successful bundling
fs.writeFileSync(packagesMarker, JSON.stringify({
bundledAt: new Date().toISOString(),
diff --git a/apps/frontend/scripts/postinstall.cjs b/apps/frontend/scripts/postinstall.cjs
index 41a8ebe645..e4c02e6dee 100644
--- a/apps/frontend/scripts/postinstall.cjs
+++ b/apps/frontend/scripts/postinstall.cjs
@@ -42,13 +42,36 @@ To install:
================================================================================
`;
+/**
+ * Get electron version from package.json
+ */
+function getElectronVersion() {
+ const pkgPath = path.join(__dirname, '..', 'package.json');
+ const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8'));
+ const electronVersion = pkg.devDependencies?.electron || pkg.dependencies?.electron;
+ if (!electronVersion) {
+ return null;
+ }
+ // Strip leading ^ or ~ from version
+ return electronVersion.replace(/^[\^~]/, '');
+}
+
/**
* Run electron-rebuild
*/
function runElectronRebuild() {
return new Promise((resolve, reject) => {
const npx = isWindows ? 'npx.cmd' : 'npx';
- const child = spawn(npx, ['electron-rebuild'], {
+ const electronVersion = getElectronVersion();
+ const args = ['electron-rebuild'];
+
+ // Explicitly pass electron version if detected
+ if (electronVersion) {
+ args.push('-v', electronVersion);
+ console.log(`[postinstall] Using Electron version: ${electronVersion}`);
+ }
+
+ const child = spawn(npx, args, {
stdio: 'inherit',
shell: isWindows,
cwd: path.join(__dirname, '..'),
@@ -70,12 +93,40 @@ function runElectronRebuild() {
* Check if node-pty is already built
*/
function isNodePtyBuilt() {
- const buildDir = path.join(__dirname, '..', 'node_modules', 'node-pty', 'build', 'Release');
- if (!fs.existsSync(buildDir)) return false;
+ // Check traditional node-pty build location (local node_modules)
+ const localBuildDir = path.join(__dirname, '..', 'node_modules', 'node-pty', 'build', 'Release');
+ if (fs.existsSync(localBuildDir)) {
+ const files = fs.readdirSync(localBuildDir);
+ if (files.some((f) => f.endsWith('.node'))) return true;
+ }
+
+ // Check root node_modules (for npm workspaces)
+ const rootBuildDir = path.join(__dirname, '..', '..', '..', 'node_modules', 'node-pty', 'build', 'Release');
+ if (fs.existsSync(rootBuildDir)) {
+ const files = fs.readdirSync(rootBuildDir);
+ if (files.some((f) => f.endsWith('.node'))) return true;
+ }
+
+ // Check for @lydell/node-pty with platform-specific prebuilts
+ const arch = os.arch();
+ const platform = os.platform();
+ const platformPkg = `@lydell/node-pty-${platform}-${arch}`;
+
+ // Check local node_modules
+ const localLydellDir = path.join(__dirname, '..', 'node_modules', platformPkg);
+ if (fs.existsSync(localLydellDir)) {
+ const files = fs.readdirSync(localLydellDir);
+ if (files.some((f) => f.endsWith('.node'))) return true;
+ }
+
+ // Check root node_modules (for npm workspaces)
+ const rootLydellDir = path.join(__dirname, '..', '..', '..', 'node_modules', platformPkg);
+ if (fs.existsSync(rootLydellDir)) {
+ const files = fs.readdirSync(rootLydellDir);
+ if (files.some((f) => f.endsWith('.node'))) return true;
+ }
- // Check for the main .node file
- const files = fs.readdirSync(buildDir);
- return files.some((f) => f.endsWith('.node'));
+ return false;
}
/**
diff --git a/apps/frontend/src/__mocks__/electron.ts b/apps/frontend/src/__mocks__/electron.ts
index 39f45801de..e5569f6893 100644
--- a/apps/frontend/src/__mocks__/electron.ts
+++ b/apps/frontend/src/__mocks__/electron.ts
@@ -56,7 +56,8 @@ export const ipcRenderer = {
on: vi.fn(),
once: vi.fn(),
removeListener: vi.fn(),
- removeAllListeners: vi.fn()
+ removeAllListeners: vi.fn(),
+ setMaxListeners: vi.fn()
};
// Mock BrowserWindow
@@ -125,6 +126,13 @@ export const nativeTheme = {
on: vi.fn()
};
+// Mock screen
+export const screen = {
+ getPrimaryDisplay: vi.fn(() => ({
+ workAreaSize: { width: 1920, height: 1080 }
+ }))
+};
+
export default {
app,
ipcMain,
@@ -133,5 +141,6 @@ export default {
dialog,
contextBridge,
shell,
- nativeTheme
+ nativeTheme,
+ screen
};
diff --git a/apps/frontend/src/__mocks__/sentry-electron-main.ts b/apps/frontend/src/__mocks__/sentry-electron-main.ts
new file mode 100644
index 0000000000..697d392257
--- /dev/null
+++ b/apps/frontend/src/__mocks__/sentry-electron-main.ts
@@ -0,0 +1 @@
+export * from './sentry-electron-shared';
diff --git a/apps/frontend/src/__mocks__/sentry-electron-renderer.ts b/apps/frontend/src/__mocks__/sentry-electron-renderer.ts
new file mode 100644
index 0000000000..697d392257
--- /dev/null
+++ b/apps/frontend/src/__mocks__/sentry-electron-renderer.ts
@@ -0,0 +1 @@
+export * from './sentry-electron-shared';
diff --git a/apps/frontend/src/__mocks__/sentry-electron-shared.ts b/apps/frontend/src/__mocks__/sentry-electron-shared.ts
new file mode 100644
index 0000000000..e2c97e98fe
--- /dev/null
+++ b/apps/frontend/src/__mocks__/sentry-electron-shared.ts
@@ -0,0 +1,26 @@
+export type SentryErrorEvent = Record;
+
+export type SentryScope = {
+ setContext: (key: string, value: Record) => void;
+};
+
+export type SentryInitOptions = {
+ beforeSend?: (event: SentryErrorEvent) => SentryErrorEvent | null;
+ tracesSampleRate?: number;
+ profilesSampleRate?: number;
+ dsn?: string;
+ environment?: string;
+ release?: string;
+ debug?: boolean;
+ enabled?: boolean;
+};
+
+export function init(_options: SentryInitOptions): void {}
+
+export function captureException(_error: Error): void {}
+
+export function withScope(callback: (scope: SentryScope) => void): void {
+ callback({
+ setContext: () => {}
+ });
+}
diff --git a/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts b/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts
index 641f8e968b..432c5f361d 100644
--- a/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts
+++ b/apps/frontend/src/__tests__/integration/ipc-bridge.test.ts
@@ -11,7 +11,8 @@ const mockIpcRenderer = {
on: vi.fn(),
once: vi.fn(),
removeListener: vi.fn(),
- removeAllListeners: vi.fn()
+ removeAllListeners: vi.fn(),
+ setMaxListeners: vi.fn()
};
// Mock contextBridge
diff --git a/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts b/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts
index 1ef0da9ded..f3ca37d495 100644
--- a/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts
+++ b/apps/frontend/src/__tests__/integration/subprocess-spawn.test.ts
@@ -30,9 +30,13 @@ const mockProcess = Object.assign(new EventEmitter(), {
})
});
-vi.mock('child_process', () => ({
- spawn: vi.fn(() => mockProcess)
-}));
+vi.mock('child_process', async (importOriginal) => {
+ const actual = await importOriginal();
+ return {
+ ...actual,
+ spawn: vi.fn(() => mockProcess)
+ };
+});
// Mock claude-profile-manager to bypass auth checks in tests
vi.mock('../../main/claude-profile-manager', () => ({
@@ -107,7 +111,7 @@ describe('Subprocess Spawn Integration', () => {
const manager = new AgentManager();
manager.configure(undefined, AUTO_CLAUDE_SOURCE);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test task description');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test task description');
expect(spawn).toHaveBeenCalledWith(
EXPECTED_PYTHON_COMMAND,
@@ -132,7 +136,7 @@ describe('Subprocess Spawn Integration', () => {
const manager = new AgentManager();
manager.configure(undefined, AUTO_CLAUDE_SOURCE);
- manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001');
+ await manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001');
expect(spawn).toHaveBeenCalledWith(
EXPECTED_PYTHON_COMMAND,
@@ -154,7 +158,7 @@ describe('Subprocess Spawn Integration', () => {
const manager = new AgentManager();
manager.configure(undefined, AUTO_CLAUDE_SOURCE);
- manager.startQAProcess('task-1', TEST_PROJECT_PATH, 'spec-001');
+ await manager.startQAProcess('task-1', TEST_PROJECT_PATH, 'spec-001');
expect(spawn).toHaveBeenCalledWith(
EXPECTED_PYTHON_COMMAND,
@@ -178,7 +182,7 @@ describe('Subprocess Spawn Integration', () => {
const manager = new AgentManager();
manager.configure(undefined, AUTO_CLAUDE_SOURCE);
- manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001', {
+ await manager.startTaskExecution('task-1', TEST_PROJECT_PATH, 'spec-001', {
parallel: true,
workers: 4
});
@@ -204,7 +208,7 @@ describe('Subprocess Spawn Integration', () => {
const logHandler = vi.fn();
manager.on('log', logHandler);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
// Simulate stdout data (must include newline for buffered output processing)
mockStdout.emit('data', Buffer.from('Test log output\n'));
@@ -220,7 +224,7 @@ describe('Subprocess Spawn Integration', () => {
const logHandler = vi.fn();
manager.on('log', logHandler);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
// Simulate stderr data (must include newline for buffered output processing)
mockStderr.emit('data', Buffer.from('Progress: 50%\n'));
@@ -236,7 +240,7 @@ describe('Subprocess Spawn Integration', () => {
const exitHandler = vi.fn();
manager.on('exit', exitHandler);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
// Simulate process exit
mockProcess.emit('exit', 0);
@@ -253,7 +257,7 @@ describe('Subprocess Spawn Integration', () => {
const errorHandler = vi.fn();
manager.on('error', errorHandler);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
// Simulate process error
mockProcess.emit('error', new Error('Spawn failed'));
@@ -266,7 +270,7 @@ describe('Subprocess Spawn Integration', () => {
const manager = new AgentManager();
manager.configure(undefined, AUTO_CLAUDE_SOURCE);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
expect(manager.isRunning('task-1')).toBe(true);
@@ -293,12 +297,12 @@ describe('Subprocess Spawn Integration', () => {
manager.configure(undefined, AUTO_CLAUDE_SOURCE);
expect(manager.getRunningTasks()).toHaveLength(0);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1');
expect(manager.getRunningTasks()).toContain('task-1');
- manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001');
+ await manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001');
expect(manager.getRunningTasks()).toHaveLength(2);
- });
+ }, 15000);
it('should use configured Python path', async () => {
const { spawn } = await import('child_process');
@@ -307,7 +311,7 @@ describe('Subprocess Spawn Integration', () => {
const manager = new AgentManager();
manager.configure('/custom/python3', AUTO_CLAUDE_SOURCE);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test');
expect(spawn).toHaveBeenCalledWith(
'/custom/python3',
@@ -321,8 +325,8 @@ describe('Subprocess Spawn Integration', () => {
const manager = new AgentManager();
manager.configure(undefined, AUTO_CLAUDE_SOURCE);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1');
- manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1');
+ await manager.startTaskExecution('task-2', TEST_PROJECT_PATH, 'spec-001');
await manager.killAll();
@@ -334,10 +338,10 @@ describe('Subprocess Spawn Integration', () => {
const manager = new AgentManager();
manager.configure(undefined, AUTO_CLAUDE_SOURCE);
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 1');
// Start another process for same task
- manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 2');
+ await manager.startSpecCreation('task-1', TEST_PROJECT_PATH, 'Test 2');
// Should have killed the first one
expect(mockProcess.kill).toHaveBeenCalled();
diff --git a/apps/frontend/src/__tests__/integration/task-lifecycle.test.ts b/apps/frontend/src/__tests__/integration/task-lifecycle.test.ts
new file mode 100644
index 0000000000..cf6641d0ec
--- /dev/null
+++ b/apps/frontend/src/__tests__/integration/task-lifecycle.test.ts
@@ -0,0 +1,382 @@
+/**
+ * Integration tests for task lifecycle
+ * Tests spec completion to subtask loading workflow (IPC communication)
+ */
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import { mkdirSync, mkdtempSync, writeFileSync, rmSync, existsSync } from 'fs';
+import { tmpdir } from 'os';
+import path from 'path';
+
+// Test directories - created securely with mkdtempSync to prevent TOCTOU attacks
+let TEST_DIR: string;
+let TEST_PROJECT_PATH: string;
+let TEST_SPEC_DIR: string;
+
+// Mock ipcRenderer for renderer-side tests
+const mockIpcRenderer = {
+ invoke: vi.fn(),
+ send: vi.fn(),
+ on: vi.fn(),
+ once: vi.fn(),
+ removeListener: vi.fn(),
+ removeAllListeners: vi.fn(),
+ setMaxListeners: vi.fn()
+};
+
+// Mock contextBridge
+const exposedApis: Record = {};
+const mockContextBridge = {
+ exposeInMainWorld: vi.fn((name: string, api: unknown) => {
+ exposedApis[name] = api;
+ })
+};
+
+vi.mock('electron', () => ({
+ ipcRenderer: mockIpcRenderer,
+ contextBridge: mockContextBridge
+}));
+
+// Sample implementation plan with subtasks
+function createTestPlan(overrides: Record = {}): object {
+ return {
+ feature: 'Test Feature',
+ workflow_type: 'feature',
+ services_involved: ['frontend'],
+ phases: [
+ {
+ id: 'phase-1',
+ name: 'Implementation Phase',
+ type: 'implementation',
+ subtasks: [
+ {
+ id: 'subtask-1-1',
+ description: 'Implement feature A',
+ status: 'pending',
+ files_to_modify: ['file1.ts'],
+ files_to_create: [],
+ service: 'frontend'
+ },
+ {
+ id: 'subtask-1-2',
+ description: 'Add unit tests for feature A',
+ status: 'pending',
+ files_to_modify: [],
+ files_to_create: ['file1.test.ts'],
+ service: 'frontend'
+ }
+ ]
+ }
+ ],
+ status: 'in_progress',
+ planStatus: 'in_progress',
+ created_at: new Date().toISOString(),
+ updated_at: new Date().toISOString(),
+ ...overrides
+ };
+}
+
+// Sample implementation plan with empty phases (incomplete state)
+function createIncompletePlan(): object {
+ return {
+ feature: 'Test Feature',
+ workflow_type: 'feature',
+ services_involved: ['frontend'],
+ phases: [],
+ status: 'planning',
+ planStatus: 'planning',
+ created_at: new Date().toISOString(),
+ updated_at: new Date().toISOString()
+ };
+}
+
+// Setup test directories with secure temp directory
+function setupTestDirs(): void {
+ // Create secure temp directory with random suffix
+ TEST_DIR = mkdtempSync(path.join(tmpdir(), 'task-lifecycle-test-'));
+ TEST_PROJECT_PATH = path.join(TEST_DIR, 'test-project');
+ TEST_SPEC_DIR = path.join(TEST_PROJECT_PATH, '.auto-claude/specs/001-test-feature');
+ mkdirSync(TEST_SPEC_DIR, { recursive: true });
+}
+
+// Cleanup test directories
+function cleanupTestDirs(): void {
+ if (TEST_DIR && existsSync(TEST_DIR)) {
+ rmSync(TEST_DIR, { recursive: true, force: true });
+ }
+}
+
+describe('Task Lifecycle Integration', () => {
+ beforeEach(async () => {
+ cleanupTestDirs();
+ setupTestDirs();
+ vi.clearAllMocks();
+ vi.resetModules();
+ Object.keys(exposedApis).forEach((key) => delete exposedApis[key]);
+ });
+
+ afterEach(() => {
+ cleanupTestDirs();
+ vi.clearAllMocks();
+ });
+
+ describe('Spec completion to subtask loading', () => {
+ it('should load subtasks from implementation_plan.json after spec completion', async () => {
+ // Create implementation_plan.json with full subtask data
+ const planPath = path.join(TEST_SPEC_DIR, 'implementation_plan.json');
+ const plan = createTestPlan();
+ writeFileSync(planPath, JSON.stringify(plan, null, 2));
+
+ // Import preload script to get electronAPI
+ await import('../../preload/index');
+ const electronAPI = exposedApis['electronAPI'] as Record;
+
+ // Mock IPC response for getTasks (loads implementation_plan.json)
+ mockIpcRenderer.invoke.mockResolvedValueOnce({
+ success: true,
+ data: [
+ {
+ id: 'task-001',
+ name: 'Test Feature',
+ status: 'spec_complete',
+ specDir: TEST_SPEC_DIR,
+ plan: plan
+ }
+ ]
+ });
+
+ // Call getTasks to load plan data
+ const getTasks = electronAPI['getTasks'] as (projectId: string) => Promise;
+ const result = await getTasks('project-id');
+
+ // Verify IPC invocation
+ expect(mockIpcRenderer.invoke).toHaveBeenCalledWith('task:list', 'project-id');
+
+ // Verify task data includes plan with subtasks
+ expect(result).toMatchObject({
+ success: true,
+ data: expect.arrayContaining([
+ expect.objectContaining({
+ plan: expect.objectContaining({
+ phases: expect.arrayContaining([
+ expect.objectContaining({
+ subtasks: expect.arrayContaining([
+ expect.objectContaining({
+ id: 'subtask-1-1',
+ description: 'Implement feature A',
+ status: 'pending'
+ }),
+ expect.objectContaining({
+ id: 'subtask-1-2',
+ description: 'Add unit tests for feature A',
+ status: 'pending'
+ })
+ ])
+ })
+ ])
+ })
+ })
+ ])
+ });
+ });
+
+ it('should handle incomplete plan data with empty phases array', async () => {
+ // Create implementation_plan.json with incomplete data (empty phases)
+ const planPath = path.join(TEST_SPEC_DIR, 'implementation_plan.json');
+ const incompletePlan = createIncompletePlan();
+ writeFileSync(planPath, JSON.stringify(incompletePlan, null, 2));
+
+ await import('../../preload/index');
+ const electronAPI = exposedApis['electronAPI'] as Record;
+
+ // Mock IPC response for getTasks
+ mockIpcRenderer.invoke.mockResolvedValueOnce({
+ success: true,
+ data: [
+ {
+ id: 'task-001',
+ name: 'Test Feature',
+ status: 'planning',
+ specDir: TEST_SPEC_DIR,
+ plan: incompletePlan
+ }
+ ]
+ });
+
+ const getTasks = electronAPI['getTasks'] as (projectId: string) => Promise;
+ const result = await getTasks('project-id');
+
+ // Verify task data reflects incomplete state
+ expect(result).toMatchObject({
+ success: true,
+ data: expect.arrayContaining([
+ expect.objectContaining({
+ plan: expect.objectContaining({
+ phases: [],
+ status: 'planning'
+ })
+ })
+ ])
+ });
+ });
+
+ it('should emit task:statusChange event when task transitions from planning to spec_complete', async () => {
+ await import('../../preload/index');
+ const electronAPI = exposedApis['electronAPI'] as Record;
+
+ // Setup event listener
+ const callback = vi.fn();
+ const onTaskStatusChange = electronAPI['onTaskStatusChange'] as (cb: Function) => Function;
+ onTaskStatusChange(callback);
+
+ // Verify listener was registered
+ expect(mockIpcRenderer.on).toHaveBeenCalledWith(
+ 'task:statusChange',
+ expect.any(Function)
+ );
+
+ // Simulate status change event from main process
+ // The event handler signature is: (_event, taskId, status)
+ const eventHandler = mockIpcRenderer.on.mock.calls.find(
+ (call) => call[0] === 'task:statusChange'
+ )?.[1];
+
+ if (eventHandler) {
+ eventHandler({}, 'task-001', 'spec_complete');
+ }
+
+ // Verify callback was invoked with correct parameters (taskId, status, projectId)
+ // Note: projectId is optional and undefined when not provided
+ expect(callback).toHaveBeenCalledWith('task-001', 'spec_complete', undefined);
+ });
+
+ it('should emit task:progress event with updated plan during spec creation', async () => {
+ await import('../../preload/index');
+ const electronAPI = exposedApis['electronAPI'] as Record;
+
+ // Setup event listener
+ const callback = vi.fn();
+ const onTaskProgress = electronAPI['onTaskProgress'] as (cb: Function) => Function;
+ onTaskProgress(callback);
+
+ // Verify listener was registered
+ expect(mockIpcRenderer.on).toHaveBeenCalledWith(
+ 'task:progress',
+ expect.any(Function)
+ );
+
+ // Simulate progress event with plan update
+ // The event handler signature is: (_event, taskId, plan)
+ const eventHandler = mockIpcRenderer.on.mock.calls.find(
+ (call) => call[0] === 'task:progress'
+ )?.[1];
+
+ const plan = createTestPlan();
+ if (eventHandler) {
+ eventHandler({}, 'task-001', plan);
+ }
+
+ // Verify callback was invoked with correct parameters (taskId, plan, projectId)
+ // Note: projectId is optional and undefined when not provided
+ expect(callback).toHaveBeenCalledWith(
+ 'task-001',
+ expect.objectContaining({
+ phases: expect.arrayContaining([
+ expect.objectContaining({
+ subtasks: expect.any(Array)
+ })
+ ])
+ }),
+ undefined
+ );
+ });
+
+ it('should handle task resume by reloading implementation plan', async () => {
+ // Create implementation_plan.json
+ const planPath = path.join(TEST_SPEC_DIR, 'implementation_plan.json');
+ const plan = createTestPlan();
+ writeFileSync(planPath, JSON.stringify(plan, null, 2));
+
+ await import('../../preload/index');
+ const electronAPI = exposedApis['electronAPI'] as Record;
+
+ // Mock IPC response for task start (resume)
+ mockIpcRenderer.invoke.mockResolvedValueOnce({
+ success: true,
+ message: 'Task resumed'
+ });
+
+ // Call startTask (resume)
+ const startTask = electronAPI['startTask'] as (id: string, options?: object) => void;
+ startTask('task-001', { resume: true });
+
+ // Verify IPC send was called
+ expect(mockIpcRenderer.send).toHaveBeenCalledWith(
+ 'task:start',
+ 'task-001',
+ { resume: true }
+ );
+ });
+
+ it('should handle task update status IPC call', async () => {
+ await import('../../preload/index');
+ // Note: electronAPI is exposed but we test the IPC channel directly below
+
+ // Check if updateTaskStatus method exists (might be part of updateTask)
+ // Based on IPC_CHANNELS, we have TASK_UPDATE_STATUS
+ mockIpcRenderer.invoke.mockResolvedValueOnce({
+ success: true
+ });
+
+ // Since updateTaskStatus might not be directly exposed, we test the IPC channel directly
+ const result = await mockIpcRenderer.invoke('task:updateStatus', 'task-001', 'in_progress');
+
+ expect(mockIpcRenderer.invoke).toHaveBeenCalledWith(
+ 'task:updateStatus',
+ 'task-001',
+ 'in_progress'
+ );
+ expect(result).toMatchObject({ success: true });
+ });
+ });
+
+ describe('Event listener cleanup', () => {
+ it('should cleanup task:progress listener when cleanup function is called', async () => {
+ await import('../../preload/index');
+ const electronAPI = exposedApis['electronAPI'] as Record;
+
+ const callback = vi.fn();
+ const onTaskProgress = electronAPI['onTaskProgress'] as (cb: Function) => Function;
+ const cleanup = onTaskProgress(callback);
+
+ expect(typeof cleanup).toBe('function');
+
+ // Call cleanup
+ cleanup();
+
+ expect(mockIpcRenderer.removeListener).toHaveBeenCalledWith(
+ 'task:progress',
+ expect.any(Function)
+ );
+ });
+
+ it('should cleanup task:statusChange listener when cleanup function is called', async () => {
+ await import('../../preload/index');
+ const electronAPI = exposedApis['electronAPI'] as Record;
+
+ const callback = vi.fn();
+ const onTaskStatusChange = electronAPI['onTaskStatusChange'] as (cb: Function) => Function;
+ const cleanup = onTaskStatusChange(callback);
+
+ expect(typeof cleanup).toBe('function');
+
+ // Call cleanup
+ cleanup();
+
+ expect(mockIpcRenderer.removeListener).toHaveBeenCalledWith(
+ 'task:statusChange',
+ expect.any(Function)
+ );
+ });
+ });
+
+});
\ No newline at end of file
diff --git a/apps/frontend/src/__tests__/integration/terminal-copy-paste.test.ts b/apps/frontend/src/__tests__/integration/terminal-copy-paste.test.ts
new file mode 100644
index 0000000000..ea4cec57d3
--- /dev/null
+++ b/apps/frontend/src/__tests__/integration/terminal-copy-paste.test.ts
@@ -0,0 +1,728 @@
+/**
+ * @vitest-environment jsdom
+ */
+
+/**
+ * Integration tests for terminal copy/paste functionality
+ * Tests xterm.js selection API integration with clipboard operations
+ */
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import { render, act } from '@testing-library/react';
+import React from 'react';
+import type { Mock } from 'vitest';
+import { Terminal as XTerm } from '@xterm/xterm';
+import { FitAddon } from '@xterm/addon-fit';
+import { WebLinksAddon } from '@xterm/addon-web-links';
+import { SerializeAddon } from '@xterm/addon-serialize';
+
+// Mock xterm.js and its addons
+vi.mock('@xterm/xterm', () => ({
+ Terminal: vi.fn().mockImplementation(function() {
+ return {
+ open: vi.fn(),
+ loadAddon: vi.fn(),
+ attachCustomKeyEventHandler: vi.fn(),
+ hasSelection: vi.fn(function() { return false; }),
+ getSelection: vi.fn(function() { return ''; }),
+ paste: vi.fn(),
+ input: vi.fn(),
+ onData: vi.fn(),
+ onResize: vi.fn(),
+ dispose: vi.fn(),
+ write: vi.fn(),
+ cols: 80,
+ rows: 24
+ };
+ })
+}));
+
+vi.mock('@xterm/addon-fit', () => ({
+ FitAddon: vi.fn().mockImplementation(function() {
+ return {
+ fit: vi.fn()
+ };
+ })
+}));
+
+vi.mock('@xterm/addon-web-links', () => ({
+ WebLinksAddon: vi.fn().mockImplementation(function() {
+ return {};
+ })
+}));
+
+vi.mock('@xterm/addon-serialize', () => ({
+ SerializeAddon: vi.fn().mockImplementation(function() {
+ return {
+ serialize: vi.fn(function() { return ''; }),
+ dispose: vi.fn()
+ };
+ })
+}));
+
+describe('Terminal copy/paste integration', () => {
+ let mockClipboard: {
+ writeText: Mock;
+ readText: Mock;
+ };
+
+ beforeEach(() => {
+ vi.clearAllMocks();
+
+ // Mock ResizeObserver
+ global.ResizeObserver = vi.fn().mockImplementation(function() {
+ return {
+ observe: vi.fn(),
+ unobserve: vi.fn(),
+ disconnect: vi.fn()
+ };
+ });
+
+ // Mock navigator.clipboard
+ mockClipboard = {
+ writeText: vi.fn().mockResolvedValue(undefined),
+ readText: vi.fn().mockResolvedValue('clipboard content')
+ };
+
+ Object.defineProperty(global.navigator, 'clipboard', {
+ value: mockClipboard,
+ writable: true
+ });
+
+ // Mock window.electronAPI
+ (window as unknown as { electronAPI: unknown }).electronAPI = {
+ sendTerminalInput: vi.fn()
+ };
+ });
+
+ afterEach(() => {
+ vi.restoreAllMocks();
+ });
+
+ describe('xterm.js selection API integration with clipboard write', () => {
+ it('should integrate xterm.hasSelection() with clipboard write', async () => {
+ const { useXterm } = await import('../../renderer/components/terminal/useXterm');
+
+ let keyEventHandler: ((event: KeyboardEvent) => boolean) | null = null;
+ const mockHasSelection = vi.fn(function() { return true; });
+ const mockGetSelection = vi.fn(function() { return 'selected terminal text'; });
+
+ // Override XTerm mock to be constructable
+ (XTerm as unknown as Mock).mockImplementation(function() {
+ return {
+ open: vi.fn(),
+ loadAddon: vi.fn(),
+ attachCustomKeyEventHandler: vi.fn(function(handler: (event: KeyboardEvent) => boolean) {
+ keyEventHandler = handler;
+ }),
+ hasSelection: mockHasSelection,
+ getSelection: mockGetSelection,
+ paste: vi.fn(),
+ input: vi.fn(),
+ onData: vi.fn(),
+ onResize: vi.fn(),
+ dispose: vi.fn(),
+ write: vi.fn(),
+ cols: 80,
+ rows: 24
+ };
+ });
+
+ // Need to also override the addon mocks to be constructable
+ (FitAddon as unknown as Mock).mockImplementation(function() {
+ return { fit: vi.fn() };
+ });
+
+ (WebLinksAddon as unknown as Mock).mockImplementation(function() {
+ return {};
+ });
+
+ (SerializeAddon as unknown as Mock).mockImplementation(function() {
+ return {
+ serialize: vi.fn(function() { return ''; }),
+ dispose: vi.fn()
+ };
+ });
+
+ // Create a test wrapper component that provides the DOM element
+ const TestWrapper = () => {
+ const { terminalRef } = useXterm({ terminalId: 'test-terminal' });
+ return React.createElement('div', { ref: terminalRef });
+ };
+
+ render(React.createElement(TestWrapper));
+
+ await act(async () => {
+ // Simulate copy operation
+ const event = new KeyboardEvent('keydown', {
+ key: 'c',
+ ctrlKey: true
+ });
+
+ if (keyEventHandler) {
+ keyEventHandler(event);
+ // Wait for clipboard write
+ await new Promise(resolve => setTimeout(resolve, 0));
+ }
+ });
+
+ // Verify integration: hasSelection() called
+ expect(mockHasSelection).toHaveBeenCalled();
+
+ // Verify integration: getSelection() called when hasSelection returns true
+ expect(mockGetSelection).toHaveBeenCalled();
+
+ // Verify integration: clipboard.writeText() called with selection
+ expect(mockClipboard.writeText).toHaveBeenCalledWith('selected terminal text');
+ });
+
+ it('should not call getSelection when hasSelection returns false', async () => {
+ const { useXterm } = await import('../../renderer/components/terminal/useXterm');
+
+ let keyEventHandler: ((event: KeyboardEvent) => boolean) | null = null;
+ const mockHasSelection = vi.fn(function() { return false; });
+ const mockGetSelection = vi.fn(function() { return ''; });
+
+ // Override XTerm mock to be constructable
+ (XTerm as unknown as Mock).mockImplementation(function() {
+ return {
+ open: vi.fn(),
+ loadAddon: vi.fn(),
+ attachCustomKeyEventHandler: vi.fn(function(handler: (event: KeyboardEvent) => boolean) {
+ keyEventHandler = handler;
+ }),
+ hasSelection: mockHasSelection,
+ getSelection: mockGetSelection,
+ paste: vi.fn(),
+ input: vi.fn(),
+ onData: vi.fn(),
+ onResize: vi.fn(),
+ dispose: vi.fn(),
+ write: vi.fn(),
+ cols: 80,
+ rows: 24
+ };
+ });
+
+ // Need to also override the addon mocks to be constructable
+ (FitAddon as unknown as Mock).mockImplementation(function() {
+ return { fit: vi.fn() };
+ });
+
+ (WebLinksAddon as unknown as Mock).mockImplementation(function() {
+ return {};
+ });
+
+ (SerializeAddon as unknown as Mock).mockImplementation(function() {
+ return {
+ serialize: vi.fn(function() { return ''; }),
+ dispose: vi.fn()
+ };
+ });
+
+ // Create a test wrapper component that provides the DOM element
+ const TestWrapper = () => {
+ const { terminalRef } = useXterm({ terminalId: 'test-terminal' });
+ return React.createElement('div', { ref: terminalRef });
+ };
+
+ render(React.createElement(TestWrapper));
+
+ await act(async () => {
+ const event = new KeyboardEvent('keydown', {
+ key: 'c',
+ ctrlKey: true
+ });
+
+ if (keyEventHandler) {
+ keyEventHandler(event);
+ }
+ });
+
+ // Verify hasSelection was called
+ expect(mockHasSelection).toHaveBeenCalled();
+
+ // Verify getSelection was NOT called (no selection)
+ expect(mockGetSelection).not.toHaveBeenCalled();
+
+ // Verify clipboard was NOT written to
+ expect(mockClipboard.writeText).not.toHaveBeenCalled();
+ });
+ });
+
+ describe('clipboard read with xterm paste integration', () => {
+ let originalNavigatorPlatform: string;
+
+ beforeEach(() => {
+ // Capture original navigator.platform
+ originalNavigatorPlatform = navigator.platform;
+ });
+
+ afterEach(() => {
+ // Restore navigator.platform
+ Object.defineProperty(navigator, 'platform', {
+ value: originalNavigatorPlatform,
+ writable: true
+ });
+ });
+
+ it('should integrate clipboard.readText() with xterm.paste()', async () => {
+ const { useXterm } = await import('../../renderer/components/terminal/useXterm');
+
+ // Mock Windows platform
+ Object.defineProperty(navigator, 'platform', {
+ value: 'Win32',
+ writable: true
+ });
+
+ let keyEventHandler: ((event: KeyboardEvent) => boolean) | null = null;
+ const mockPaste = vi.fn();
+
+ // Override XTerm mock to be constructable
+ (XTerm as unknown as Mock).mockImplementation(function() {
+ return {
+ open: vi.fn(),
+ loadAddon: vi.fn(),
+ attachCustomKeyEventHandler: vi.fn(function(handler: (event: KeyboardEvent) => boolean) {
+ keyEventHandler = handler;
+ }),
+ hasSelection: vi.fn(),
+ getSelection: vi.fn(),
+ paste: mockPaste,
+ input: vi.fn(),
+ onData: vi.fn(),
+ onResize: vi.fn(),
+ dispose: vi.fn(),
+ write: vi.fn(),
+ cols: 80,
+ rows: 24
+ };
+ });
+
+ // Need to also override the addon mocks to be constructable
+ (FitAddon as unknown as Mock).mockImplementation(function() {
+ return { fit: vi.fn() };
+ });
+
+ (WebLinksAddon as unknown as Mock).mockImplementation(function() {
+ return {};
+ });
+
+ (SerializeAddon as unknown as Mock).mockImplementation(function() {
+ return {
+ serialize: vi.fn(function() { return ''; }),
+ dispose: vi.fn()
+ };
+ });
+
+ mockClipboard.readText.mockResolvedValue('pasted text');
+
+ // Create a test wrapper component that provides the DOM element
+ const TestWrapper = () => {
+ const { terminalRef } = useXterm({ terminalId: 'test-terminal' });
+ return React.createElement('div', { ref: terminalRef });
+ };
+
+ render(React.createElement(TestWrapper));
+
+ await act(async () => {
+ const event = new KeyboardEvent('keydown', {
+ key: 'v',
+ ctrlKey: true
+ });
+
+ if (keyEventHandler) {
+ keyEventHandler(event);
+ // Wait for clipboard read and paste
+ await new Promise(resolve => setTimeout(resolve, 0));
+ }
+ });
+
+ // Verify integration: clipboard.readText() called
+ expect(mockClipboard.readText).toHaveBeenCalled();
+
+ // Verify integration: xterm.paste() called with clipboard content
+ expect(mockPaste).toHaveBeenCalledWith('pasted text');
+ });
+
+ it('should not paste when clipboard is empty', async () => {
+ const { useXterm } = await import('../../renderer/components/terminal/useXterm');
+
+ // Mock Linux platform
+ Object.defineProperty(navigator, 'platform', {
+ value: 'Linux',
+ writable: true
+ });
+
+ let keyEventHandler: ((event: KeyboardEvent) => boolean) | null = null;
+ const mockPaste = vi.fn();
+
+ // Override XTerm mock to be constructable
+ (XTerm as unknown as Mock).mockImplementation(function() {
+ return {
+ open: vi.fn(),
+ loadAddon: vi.fn(),
+ attachCustomKeyEventHandler: vi.fn(function(handler: (event: KeyboardEvent) => boolean) {
+ keyEventHandler = handler;
+ }),
+ hasSelection: vi.fn(),
+ getSelection: vi.fn(),
+ paste: mockPaste,
+ input: vi.fn(),
+ onData: vi.fn(),
+ onResize: vi.fn(),
+ dispose: vi.fn(),
+ write: vi.fn(),
+ cols: 80,
+ rows: 24
+ };
+ });
+
+ // Need to also override the addon mocks to be constructable
+ (FitAddon as unknown as Mock).mockImplementation(function() {
+ return { fit: vi.fn() };
+ });
+
+ (WebLinksAddon as unknown as Mock).mockImplementation(function() {
+ return {};
+ });
+
+ (SerializeAddon as unknown as Mock).mockImplementation(function() {
+ return {
+ serialize: vi.fn(function() { return ''; }),
+ dispose: vi.fn()
+ };
+ });
+
+ // Mock empty clipboard
+ mockClipboard.readText.mockResolvedValue('');
+
+ // Create a test wrapper component that provides the DOM element
+ const TestWrapper = () => {
+ const { terminalRef } = useXterm({ terminalId: 'test-terminal' });
+ return React.createElement('div', { ref: terminalRef });
+ };
+
+ render(React.createElement(TestWrapper));
+
+ await act(async () => {
+ const event = new KeyboardEvent('keydown', {
+ key: 'v',
+ ctrlKey: true
+ });
+
+ if (keyEventHandler) {
+ keyEventHandler(event);
+ // Wait for clipboard read
+ await new Promise(resolve => setTimeout(resolve, 0));
+ }
+ });
+
+ // Verify clipboard was read
+ expect(mockClipboard.readText).toHaveBeenCalled();
+
+ // Verify paste was NOT called for empty clipboard
+ expect(mockPaste).not.toHaveBeenCalled();
+ });
+ });
+
+ describe('keyboard event propagation', () => {
+ it('should prevent copy/paste events from interfering with other shortcuts', async () => {
+ const { useXterm } = await import('../../renderer/components/terminal/useXterm');
+
+ let keyEventHandler: ((event: KeyboardEvent) => boolean) | null = null;
+ let eventCallOrder: string[] = [];
+
+ // Override XTerm mock to be constructable
+ (XTerm as unknown as Mock).mockImplementation(function() {
+ return {
+ open: vi.fn(),
+ loadAddon: vi.fn(),
+ attachCustomKeyEventHandler: vi.fn(function(handler: (event: KeyboardEvent) => boolean) {
+ keyEventHandler = handler;
+ }),
+ hasSelection: vi.fn(function() { return true; }),
+ getSelection: vi.fn(function() { return 'selection'; }),
+ paste: vi.fn(),
+ input: vi.fn(function(data: string) {
+ eventCallOrder.push(`input:${data}`);
+ }),
+ onData: vi.fn(),
+ onResize: vi.fn(),
+ dispose: vi.fn(),
+ write: vi.fn(),
+ cols: 80,
+ rows: 24
+ };
+ });
+
+ // Need to also override the addon mocks to be constructable
+ (FitAddon as unknown as Mock).mockImplementation(function() {
+ return { fit: vi.fn() };
+ });
+
+ (WebLinksAddon as unknown as Mock).mockImplementation(function() {
+ return {};
+ });
+
+ (SerializeAddon as unknown as Mock).mockImplementation(function() {
+ return {
+ serialize: vi.fn(function() { return ''; }),
+ dispose: vi.fn()
+ };
+ });
+
+ // Create a test wrapper component that provides the DOM element
+ const TestWrapper = () => {
+ const { terminalRef } = useXterm({ terminalId: 'test-terminal' });
+ return React.createElement('div', { ref: terminalRef });
+ };
+
+ render(React.createElement(TestWrapper));
+
+ await act(async () => {
+ // Test SHIFT+Enter (should work independently of copy/paste)
+ const shiftEnterEvent = new KeyboardEvent('keydown', {
+ key: 'Enter',
+ shiftKey: true,
+ ctrlKey: false,
+ metaKey: false
+ });
+
+ if (keyEventHandler) {
+ keyEventHandler(shiftEnterEvent);
+ }
+
+ // Verify SHIFT+Enter still works (sends newline)
+ expect(eventCallOrder.some(s => s.includes('\x1b\n'))).toBe(true);
+
+ // Test CTRL+C with selection (should not interfere)
+ eventCallOrder = [];
+ const copyEvent = new KeyboardEvent('keydown', {
+ key: 'c',
+ ctrlKey: true
+ });
+
+ if (keyEventHandler) {
+ keyEventHandler(copyEvent);
+ // Wait for clipboard write
+ await new Promise(resolve => setTimeout(resolve, 0));
+ }
+
+ // Copy should not send input to terminal
+ expect(eventCallOrder).toHaveLength(0);
+
+ // Test CTRL+V (should not interfere)
+ const pasteEvent = new KeyboardEvent('keydown', {
+ key: 'v',
+ ctrlKey: true
+ });
+
+ if (keyEventHandler) {
+ keyEventHandler(pasteEvent);
+ // Wait for clipboard read
+ await new Promise(resolve => setTimeout(resolve, 0));
+ }
+
+ // Paste should use xterm.paste(), not xterm.input()
+ // The input() should not be called directly
+ expect(eventCallOrder).toHaveLength(0);
+ });
+ });
+
+ it('should maintain correct handler ordering for existing shortcuts', async () => {
+ const { useXterm } = await import('../../renderer/components/terminal/useXterm');
+
+ let keyEventHandler: ((event: KeyboardEvent) => boolean) | null = null;
+ let handlerResults: { key: string; handled: boolean }[] = [];
+ const mockHasSelection = vi.fn(function() { return false; });
+
+ // Override XTerm mock to be constructable
+ (XTerm as unknown as Mock).mockImplementation(function() {
+ return {
+ open: vi.fn(),
+ loadAddon: vi.fn(),
+ attachCustomKeyEventHandler: vi.fn(function(handler: (event: KeyboardEvent) => boolean) {
+ keyEventHandler = handler;
+ }),
+ hasSelection: mockHasSelection,
+ getSelection: vi.fn(),
+ paste: vi.fn(),
+ input: vi.fn(),
+ onData: vi.fn(),
+ onResize: vi.fn(),
+ dispose: vi.fn(),
+ write: vi.fn(),
+ cols: 80,
+ rows: 24
+ };
+ });
+
+ // Need to also override the addon mocks to be constructable
+ (FitAddon as unknown as Mock).mockImplementation(function() {
+ return { fit: vi.fn() };
+ });
+
+ (WebLinksAddon as unknown as Mock).mockImplementation(function() {
+ return {};
+ });
+
+ (SerializeAddon as unknown as Mock).mockImplementation(function() {
+ return {
+ serialize: vi.fn(function() { return ''; }),
+ dispose: vi.fn()
+ };
+ });
+
+ // Create a test wrapper component that provides the DOM element
+ const TestWrapper = () => {
+ const { terminalRef } = useXterm({ terminalId: 'test-terminal' });
+ return React.createElement('div', { ref: terminalRef });
+ };
+
+ render(React.createElement(TestWrapper));
+
+ // Helper to test key handling
+ const testKey = (key: string, ctrl: boolean, meta: boolean, shift: boolean) => {
+ const event = new KeyboardEvent('keydown', {
+ key,
+ ctrlKey: ctrl,
+ metaKey: meta,
+ shiftKey: shift
+ });
+
+ if (keyEventHandler) {
+ const handled = keyEventHandler(event);
+ handlerResults.push({ key, handled });
+ }
+ };
+
+ await act(async () => {
+ // Test existing shortcuts (should return false to bubble up)
+ testKey('1', true, false, false); // Ctrl+1
+ testKey('Tab', true, false, false); // Ctrl+Tab
+ testKey('t', true, false, false); // Ctrl+T
+ testKey('w', true, false, false); // Ctrl+W
+
+ // Verify these return false (bubble to window handler)
+ expect(handlerResults.filter(r => !r.handled)).toHaveLength(4);
+
+ // Test copy/paste WITHOUT selection (should pass through to send ^C)
+ handlerResults = [];
+ mockHasSelection.mockReturnValue(false);
+ testKey('c', true, false, false); // Ctrl+C without selection
+
+ // Should return true (let ^C pass through to terminal for interrupt signal)
+ expect(handlerResults[0].handled).toBe(true);
+ });
+ });
+ });
+
+ describe('clipboard error handling without breaking terminal', () => {
+ it('should continue terminal operation after clipboard error', async () => {
+ const { useXterm } = await import('../../renderer/components/terminal/useXterm');
+
+ // Mock Windows platform to enable custom paste handler
+ Object.defineProperty(navigator, 'platform', {
+ value: 'Win32',
+ writable: true
+ });
+
+ let keyEventHandler: ((event: KeyboardEvent) => boolean) | null = null;
+ const mockPaste = vi.fn();
+ const mockInput = vi.fn();
+ const mockSendTerminalInput = vi.fn();
+ let onDataCallback: ((data: string) => void) | undefined;
+ let errorLogged = false;
+
+ const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(function(...args: unknown[]) {
+ if (String(args[0]).includes('[useXterm]')) {
+ errorLogged = true;
+ }
+ });
+
+ // Mock clipboard error
+ mockClipboard.readText = vi.fn().mockRejectedValue(new Error('Clipboard denied'));
+
+ // Mock window.electronAPI with sendTerminalInput
+ (window as unknown as { electronAPI: { sendTerminalInput: Mock } }).electronAPI = {
+ sendTerminalInput: mockSendTerminalInput
+ };
+
+ // Override XTerm mock to be constructable
+ (XTerm as unknown as Mock).mockImplementation(function() {
+ return {
+ open: vi.fn(),
+ loadAddon: vi.fn(),
+ attachCustomKeyEventHandler: vi.fn(function(handler: (event: KeyboardEvent) => boolean) {
+ keyEventHandler = handler;
+ }),
+ hasSelection: vi.fn(),
+ getSelection: vi.fn(),
+ paste: mockPaste,
+ input: mockInput,
+ onData: vi.fn(function(callback: (data: string) => void) {
+ onDataCallback = callback;
+ }),
+ onResize: vi.fn(),
+ dispose: vi.fn(),
+ write: vi.fn(),
+ cols: 80,
+ rows: 24
+ };
+ });
+
+ // Need to also override the addon mocks to be constructable
+ (FitAddon as unknown as Mock).mockImplementation(function() {
+ return { fit: vi.fn() };
+ });
+
+ (WebLinksAddon as unknown as Mock).mockImplementation(function() {
+ return {};
+ });
+
+ (SerializeAddon as unknown as Mock).mockImplementation(function() {
+ return {
+ serialize: vi.fn(function() { return ''; }),
+ dispose: vi.fn()
+ };
+ });
+
+ // Create a test wrapper component that provides the DOM element
+ const TestWrapper = () => {
+ const { terminalRef } = useXterm({ terminalId: 'test-terminal' });
+ return React.createElement('div', { ref: terminalRef });
+ };
+
+ render(React.createElement(TestWrapper));
+
+ await act(async () => {
+ // Try to paste (will fail)
+ const pasteEvent = new KeyboardEvent('keydown', {
+ key: 'v',
+ ctrlKey: true
+ });
+
+ if (keyEventHandler) {
+ keyEventHandler(pasteEvent);
+ // Wait for clipboard error
+ await new Promise(resolve => setTimeout(resolve, 0));
+ }
+ });
+
+ // Verify error was logged
+ expect(errorLogged).toBe(true);
+
+ // Verify terminal still works (can accept input through onData callback)
+ const inputData = 'test command';
+
+ if (onDataCallback) {
+ onDataCallback(inputData);
+ }
+
+ // Verify input was sent to electronAPI (terminal still functional)
+ expect(mockSendTerminalInput).toHaveBeenCalledWith('test-terminal', 'test command');
+
+ consoleErrorSpy.mockRestore();
+ });
+ });
+});
diff --git a/apps/frontend/src/__tests__/setup.ts b/apps/frontend/src/__tests__/setup.ts
index 34f7a6465f..dc2c99dd91 100644
--- a/apps/frontend/src/__tests__/setup.ts
+++ b/apps/frontend/src/__tests__/setup.ts
@@ -28,6 +28,14 @@ Object.defineProperty(global, 'localStorage', {
value: localStorageMock
});
+// Mock scrollIntoView for Radix Select in jsdom
+if (typeof HTMLElement !== 'undefined' && !HTMLElement.prototype.scrollIntoView) {
+ Object.defineProperty(HTMLElement.prototype, 'scrollIntoView', {
+ value: vi.fn(),
+ writable: true
+ });
+}
+
// Test data directory for isolated file operations
export const TEST_DATA_DIR = '/tmp/auto-claude-ui-tests';
@@ -88,7 +96,14 @@ if (typeof window !== 'undefined') {
success: true,
data: { openProjectIds: [], activeProjectId: null, tabOrder: [] }
}),
- saveTabState: vi.fn().mockResolvedValue({ success: true })
+ saveTabState: vi.fn().mockResolvedValue({ success: true }),
+ // Profile-related API methods (API Profile feature)
+ getAPIProfiles: vi.fn(),
+ saveAPIProfile: vi.fn(),
+ updateAPIProfile: vi.fn(),
+ deleteAPIProfile: vi.fn(),
+ setActiveAPIProfile: vi.fn(),
+ testConnection: vi.fn()
};
}
diff --git a/apps/frontend/src/main/__tests__/claude-cli-utils.test.ts b/apps/frontend/src/main/__tests__/claude-cli-utils.test.ts
new file mode 100644
index 0000000000..42bd919b3b
--- /dev/null
+++ b/apps/frontend/src/main/__tests__/claude-cli-utils.test.ts
@@ -0,0 +1,126 @@
+import path from 'path';
+import { beforeEach, describe, expect, it, vi } from 'vitest';
+
+const mockGetToolPath = vi.fn<() => string>();
+const mockGetAugmentedEnv = vi.fn<() => Record>();
+
+vi.mock('../cli-tool-manager', () => ({
+ getToolPath: mockGetToolPath,
+}));
+
+vi.mock('../env-utils', () => ({
+ getAugmentedEnv: mockGetAugmentedEnv,
+}));
+
+describe('claude-cli-utils', () => {
+ beforeEach(() => {
+ mockGetToolPath.mockReset();
+ mockGetAugmentedEnv.mockReset();
+ vi.resetModules();
+ });
+
+ it('prepends the CLI directory to PATH when the command is absolute', async () => {
+ const command = process.platform === 'win32'
+ ? 'C:\\Tools\\claude\\claude.exe'
+ : '/opt/claude/bin/claude';
+ const env = {
+ PATH: process.platform === 'win32'
+ ? 'C:\\Windows\\System32'
+ : '/usr/bin',
+ HOME: '/tmp',
+ };
+ mockGetToolPath.mockReturnValue(command);
+ mockGetAugmentedEnv.mockReturnValue(env);
+
+ const { getClaudeCliInvocation } = await import('../claude-cli-utils');
+ const result = getClaudeCliInvocation();
+
+ const separator = process.platform === 'win32' ? ';' : ':';
+ expect(result.command).toBe(command);
+ expect(result.env.PATH.split(separator)[0]).toBe(path.dirname(command));
+ expect(result.env.HOME).toBe(env.HOME);
+ });
+
+ it('sets PATH to the command directory when PATH is empty', async () => {
+ const command = process.platform === 'win32'
+ ? 'C:\\Tools\\claude\\claude.exe'
+ : '/opt/claude/bin/claude';
+ const env = { PATH: '' };
+ mockGetToolPath.mockReturnValue(command);
+ mockGetAugmentedEnv.mockReturnValue(env);
+
+ const { getClaudeCliInvocation } = await import('../claude-cli-utils');
+ const result = getClaudeCliInvocation();
+
+ expect(result.env.PATH).toBe(path.dirname(command));
+ });
+
+ it('sets PATH to the command directory when PATH is missing', async () => {
+ const command = process.platform === 'win32'
+ ? 'C:\\Tools\\claude\\claude.exe'
+ : '/opt/claude/bin/claude';
+ const env = {};
+ mockGetToolPath.mockReturnValue(command);
+ mockGetAugmentedEnv.mockReturnValue(env);
+
+ const { getClaudeCliInvocation } = await import('../claude-cli-utils');
+ const result = getClaudeCliInvocation();
+
+ expect(result.env.PATH).toBe(path.dirname(command));
+ });
+
+ it('keeps PATH unchanged when the command is not absolute', async () => {
+ const env = {
+ PATH: process.platform === 'win32'
+ ? 'C:\\Windows;C:\\Windows\\System32'
+ : '/usr/bin:/bin',
+ };
+ mockGetToolPath.mockReturnValue('claude');
+ mockGetAugmentedEnv.mockReturnValue(env);
+
+ const { getClaudeCliInvocation } = await import('../claude-cli-utils');
+ const result = getClaudeCliInvocation();
+
+ expect(result.command).toBe('claude');
+ expect(result.env.PATH).toBe(env.PATH);
+ });
+
+ it('does not duplicate the command directory in PATH', async () => {
+ const command = process.platform === 'win32'
+ ? 'C:\\Tools\\claude\\claude.exe'
+ : '/opt/claude/bin/claude';
+ const commandDir = path.dirname(command);
+ const separator = process.platform === 'win32' ? ';' : ':';
+ const env = { PATH: `${commandDir}${separator}/usr/bin` };
+
+ mockGetToolPath.mockReturnValue(command);
+ mockGetAugmentedEnv.mockReturnValue(env);
+
+ const { getClaudeCliInvocation } = await import('../claude-cli-utils');
+ const result = getClaudeCliInvocation();
+
+ expect(result.env.PATH).toBe(env.PATH);
+ });
+
+ it('treats PATH entries case-insensitively on Windows', async () => {
+ const originalPlatform = Object.getOwnPropertyDescriptor(process, 'platform');
+ Object.defineProperty(process, 'platform', { value: 'win32' });
+
+ try {
+ const command = 'C:\\Tools\\claude\\claude.exe';
+ const env = { PATH: 'c:\\tools\\claude;C:\\Windows' };
+
+ mockGetToolPath.mockReturnValue(command);
+ mockGetAugmentedEnv.mockReturnValue(env);
+
+ const { getClaudeCliInvocation } = await import('../claude-cli-utils');
+ const result = getClaudeCliInvocation();
+
+ expect(result.env.PATH).toBe(env.PATH);
+ } finally {
+ if (originalPlatform) {
+ Object.defineProperty(process, 'platform', originalPlatform);
+ }
+ }
+ });
+});
diff --git a/apps/frontend/src/main/__tests__/cli-tool-manager.test.ts b/apps/frontend/src/main/__tests__/cli-tool-manager.test.ts
new file mode 100644
index 0000000000..b39c588a6d
--- /dev/null
+++ b/apps/frontend/src/main/__tests__/cli-tool-manager.test.ts
@@ -0,0 +1,469 @@
+/**
+ * Unit tests for cli-tool-manager
+ * Tests CLI tool detection with focus on NVM path detection
+ */
+
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import { existsSync, readdirSync } from 'fs';
+import os from 'os';
+import { execFileSync } from 'child_process';
+import { app } from 'electron';
+import {
+ getToolInfo,
+ clearToolCache,
+ getClaudeDetectionPaths,
+ sortNvmVersionDirs,
+ buildClaudeDetectionResult
+} from '../cli-tool-manager';
+
+// Mock Electron app
+vi.mock('electron', () => ({
+ app: {
+ isPackaged: false,
+ getPath: vi.fn()
+ }
+}));
+
+// Mock os module
+vi.mock('os', () => ({
+ default: {
+ homedir: vi.fn(() => '/mock/home')
+ }
+}));
+
+// Mock fs module - need to mock both sync and promises
+vi.mock('fs', () => {
+ const mockDirent = (
+ name: string,
+ isDir: boolean
+ ): { name: string; isDirectory: () => boolean } => ({
+ name,
+ isDirectory: () => isDir
+ });
+
+ return {
+ existsSync: vi.fn(),
+ readdirSync: vi.fn(),
+ promises: {}
+ };
+});
+
+// Mock child_process for execFileSync and execFile (used in validation)
+vi.mock('child_process', () => ({
+ execFileSync: vi.fn(),
+ execFile: vi.fn()
+}));
+
+// Mock env-utils to avoid PATH augmentation complexity
+vi.mock('../env-utils', () => ({
+ findExecutable: vi.fn(() => null), // Return null to force platform-specific path checking
+ getAugmentedEnv: vi.fn(() => ({ PATH: '' }))
+}));
+
+// Mock homebrew-python utility
+vi.mock('../utils/homebrew-python', () => ({
+ findHomebrewPython: vi.fn(() => null)
+}));
+
+describe('cli-tool-manager - Claude CLI NVM detection', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ // Set default platform to Linux
+ Object.defineProperty(process, 'platform', {
+ value: 'linux',
+ writable: true
+ });
+ });
+
+ afterEach(() => {
+ clearToolCache();
+ });
+
+ const mockHomeDir = '/mock/home';
+
+ describe('NVM path detection on Unix/Linux/macOS', () => {
+ it('should detect Claude CLI in NVM directory when multiple Node versions exist', () => {
+ // Mock home directory
+ vi.mocked(os.homedir).mockReturnValue(mockHomeDir);
+
+ // Mock NVM directory exists
+ vi.mocked(existsSync).mockImplementation((filePath) => {
+ const pathStr = String(filePath);
+ // NVM versions directory exists
+ if (pathStr.includes('.nvm/versions/node')) {
+ return true;
+ }
+ // Claude CLI exists in v22.17.0
+ if (pathStr.includes('v22.17.0/bin/claude')) {
+ return true;
+ }
+ return false;
+ });
+
+ // Mock readdirSync to return Node version directories
+ vi.mocked(readdirSync).mockImplementation((filePath, options) => {
+ const pathStr = String(filePath);
+ if (pathStr.includes('.nvm/versions/node')) {
+ return [
+ { name: 'v20.11.0', isDirectory: () => true },
+ { name: 'v22.17.0', isDirectory: () => true }
+ ] as any;
+ }
+ return [] as any;
+ });
+
+ // Mock execFileSync to return version for validation
+ vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n');
+
+ const result = getToolInfo('claude');
+
+ expect(result.found).toBe(true);
+ expect(result.path).toContain('v22.17.0');
+ expect(result.path).toContain('bin/claude');
+ expect(result.source).toBe('nvm');
+ });
+
+ it('should try multiple NVM Node versions until finding Claude CLI', () => {
+ vi.mocked(os.homedir).mockReturnValue(mockHomeDir);
+
+ vi.mocked(existsSync).mockImplementation((filePath) => {
+ const pathStr = String(filePath);
+ if (pathStr.includes('.nvm/versions/node')) {
+ return true;
+ }
+ // Only v24.12.0 has Claude CLI
+ if (pathStr.includes('v24.12.0/bin/claude')) {
+ return true;
+ }
+ return false;
+ });
+
+ vi.mocked(readdirSync).mockImplementation((filePath) => {
+ const pathStr = String(filePath);
+ if (pathStr.includes('.nvm/versions/node')) {
+ return [
+ { name: 'v18.20.0', isDirectory: () => true },
+ { name: 'v20.11.0', isDirectory: () => true },
+ { name: 'v24.12.0', isDirectory: () => true }
+ ] as any;
+ }
+ return [] as any;
+ });
+
+ vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n');
+
+ const result = getToolInfo('claude');
+
+ expect(result.found).toBe(true);
+ expect(result.path).toContain('v24.12.0');
+ expect(result.source).toBe('nvm');
+ });
+
+ it('should skip non-version directories in NVM (e.g., does not start with "v")', () => {
+ vi.mocked(os.homedir).mockReturnValue(mockHomeDir);
+
+ vi.mocked(existsSync).mockImplementation((filePath) => {
+ const pathStr = String(filePath);
+ if (pathStr.includes('.nvm/versions/node')) {
+ return true;
+ }
+ // Only the correctly named version has Claude
+ if (pathStr.includes('v22.17.0/bin/claude')) {
+ return true;
+ }
+ return false;
+ });
+
+ vi.mocked(readdirSync).mockImplementation((filePath) => {
+ const pathStr = String(filePath);
+ if (pathStr.includes('.nvm/versions/node')) {
+ return [
+ { name: 'current', isDirectory: () => true }, // Should be skipped
+ { name: 'system', isDirectory: () => true }, // Should be skipped
+ { name: 'v22.17.0', isDirectory: () => true } // Should be checked
+ ] as any;
+ }
+ return [] as any;
+ });
+
+ vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n');
+
+ const result = getToolInfo('claude');
+
+ expect(result.found).toBe(true);
+ expect(result.path).toContain('v22.17.0');
+ });
+
+ it('should not check NVM paths on Windows', () => {
+ Object.defineProperty(process, 'platform', {
+ value: 'win32',
+ writable: true
+ });
+
+ vi.mocked(os.homedir).mockReturnValue('C:\\Users\\test');
+
+ // Even if NVM directory exists on Windows, should not check it
+ vi.mocked(existsSync).mockReturnValue(false);
+ vi.mocked(readdirSync).mockReturnValue([]);
+
+ const result = getToolInfo('claude');
+
+ // Should not be found from NVM on Windows
+ expect(result.source).not.toBe('nvm');
+ });
+
+ it('should handle missing NVM directory gracefully', () => {
+ vi.mocked(os.homedir).mockReturnValue(mockHomeDir);
+
+ // NVM directory does not exist
+ vi.mocked(existsSync).mockReturnValue(false);
+
+ const result = getToolInfo('claude');
+
+ // Should not find via NVM
+ expect(result.source).not.toBe('nvm');
+ expect(result.found).toBe(false);
+ });
+
+ it('should handle readdirSync errors gracefully', () => {
+ vi.mocked(os.homedir).mockReturnValue(mockHomeDir);
+
+ vi.mocked(existsSync).mockReturnValue(true);
+ vi.mocked(readdirSync).mockImplementation(() => {
+ throw new Error('Permission denied');
+ });
+
+ const result = getToolInfo('claude');
+
+ // Should not crash, should fall back to other detection methods
+ expect(result.source).not.toBe('nvm');
+ });
+
+ it('should validate Claude CLI before returning NVM path', () => {
+ vi.mocked(os.homedir).mockReturnValue(mockHomeDir);
+
+ vi.mocked(existsSync).mockImplementation((filePath) => {
+ const pathStr = String(filePath);
+ if (pathStr.includes('.nvm/versions/node')) {
+ return true;
+ }
+ if (pathStr.includes('v22.17.0/bin/claude')) {
+ return true;
+ }
+ return false;
+ });
+
+ vi.mocked(readdirSync).mockImplementation(() => {
+ return [{ name: 'v22.17.0', isDirectory: () => true }] as any;
+ });
+
+ // Mock validation failure (execFileSync throws)
+ vi.mocked(execFileSync).mockImplementation(() => {
+ throw new Error('Command failed');
+ });
+
+ const result = getToolInfo('claude');
+
+ // Should not return unvalidated path
+ expect(result.found).toBe(false);
+ expect(result.source).not.toBe('nvm');
+ });
+
+ it('should handle NVM directory with no version subdirectories', () => {
+ vi.mocked(os.homedir).mockReturnValue(mockHomeDir);
+
+ vi.mocked(existsSync).mockImplementation((filePath) => {
+ return String(filePath).includes('.nvm/versions/node');
+ });
+
+ // Empty NVM directory
+ vi.mocked(readdirSync).mockReturnValue([]);
+
+ const result = getToolInfo('claude');
+
+ expect(result.source).not.toBe('nvm');
+ });
+ });
+
+ describe('NVM on macOS', () => {
+ it('should detect Claude CLI via NVM on macOS', () => {
+ Object.defineProperty(process, 'platform', {
+ value: 'darwin',
+ writable: true
+ });
+
+ vi.mocked(os.homedir).mockReturnValue('/Users/test');
+
+ vi.mocked(existsSync).mockImplementation((filePath) => {
+ const pathStr = String(filePath);
+ if (pathStr.includes('.nvm/versions/node')) {
+ return true;
+ }
+ if (pathStr.includes('v22.17.0/bin/claude')) {
+ return true;
+ }
+ return false;
+ });
+
+ vi.mocked(readdirSync).mockImplementation(() => {
+ return [{ name: 'v22.17.0', isDirectory: () => true }] as any;
+ });
+
+ vi.mocked(execFileSync).mockReturnValue('claude-code version 1.0.0\n');
+
+ const result = getToolInfo('claude');
+
+ expect(result.found).toBe(true);
+ expect(result.source).toBe('nvm');
+ expect(result.path).toContain('v22.17.0');
+ });
+ });
+});
+
+/**
+ * Unit tests for helper functions
+ */
+describe('cli-tool-manager - Helper Functions', () => {
+ describe('getClaudeDetectionPaths', () => {
+ it('should return homebrew paths on macOS', () => {
+ Object.defineProperty(process, 'platform', {
+ value: 'darwin',
+ writable: true
+ });
+
+ const paths = getClaudeDetectionPaths('/Users/test');
+
+ expect(paths.homebrewPaths).toContain('/opt/homebrew/bin/claude');
+ expect(paths.homebrewPaths).toContain('/usr/local/bin/claude');
+ });
+
+ it('should return Windows paths on win32', () => {
+ Object.defineProperty(process, 'platform', {
+ value: 'win32',
+ writable: true
+ });
+
+ const paths = getClaudeDetectionPaths('C:\\Users\\test');
+
+ // Windows paths should include AppData and Program Files
+ expect(paths.platformPaths.some(p => p.includes('AppData'))).toBe(true);
+ expect(paths.platformPaths.some(p => p.includes('Program Files'))).toBe(true);
+ });
+
+ it('should return Unix paths on Linux', () => {
+ Object.defineProperty(process, 'platform', {
+ value: 'linux',
+ writable: true
+ });
+
+ const paths = getClaudeDetectionPaths('/home/test');
+
+ expect(paths.platformPaths.some(p => p.includes('.local/bin/claude'))).toBe(true);
+ expect(paths.platformPaths.some(p => p.includes('bin/claude'))).toBe(true);
+ });
+
+ it('should return correct NVM versions directory', () => {
+ const paths = getClaudeDetectionPaths('/home/test');
+
+ expect(paths.nvmVersionsDir).toBe('/home/test/.nvm/versions/node');
+ });
+ });
+
+ describe('sortNvmVersionDirs', () => {
+ it('should sort versions in descending order (newest first)', () => {
+ const entries = [
+ { name: 'v18.20.0', isDirectory: () => true },
+ { name: 'v22.17.0', isDirectory: () => true },
+ { name: 'v20.11.0', isDirectory: () => true }
+ ];
+
+ const sorted = sortNvmVersionDirs(entries);
+
+ expect(sorted).toEqual(['v22.17.0', 'v20.11.0', 'v18.20.0']);
+ });
+
+ it('should filter out non-version directories', () => {
+ const entries = [
+ { name: 'v20.11.0', isDirectory: () => true },
+ { name: '.DS_Store', isDirectory: () => false },
+ { name: 'node_modules', isDirectory: () => true },
+ { name: 'current', isDirectory: () => true },
+ { name: 'v22.17.0', isDirectory: () => true }
+ ];
+
+ const sorted = sortNvmVersionDirs(entries);
+
+ expect(sorted).toEqual(['v22.17.0', 'v20.11.0']);
+ expect(sorted).not.toContain('.DS_Store');
+ expect(sorted).not.toContain('node_modules');
+ expect(sorted).not.toContain('current');
+ });
+
+ it('should return empty array when no valid versions', () => {
+ const entries = [
+ { name: 'current', isDirectory: () => true },
+ { name: 'system', isDirectory: () => true }
+ ];
+
+ const sorted = sortNvmVersionDirs(entries);
+
+ expect(sorted).toEqual([]);
+ });
+
+ it('should handle single entry', () => {
+ const entries = [{ name: 'v20.11.0', isDirectory: () => true }];
+
+ const sorted = sortNvmVersionDirs(entries);
+
+ expect(sorted).toEqual(['v20.11.0']);
+ });
+
+ it('should handle empty array', () => {
+ const sorted = sortNvmVersionDirs([]);
+
+ expect(sorted).toEqual([]);
+ });
+ });
+
+ describe('buildClaudeDetectionResult', () => {
+ it('should return null when validation fails', () => {
+ const result = buildClaudeDetectionResult(
+ '/path/to/claude',
+ { valid: false, message: 'Invalid CLI' },
+ 'nvm',
+ 'Found via NVM'
+ );
+
+ expect(result).toBeNull();
+ });
+
+ it('should return proper result when validation succeeds', () => {
+ const result = buildClaudeDetectionResult(
+ '/path/to/claude',
+ { valid: true, version: '1.0.0', message: 'Valid' },
+ 'nvm',
+ 'Found via NVM'
+ );
+
+ expect(result).not.toBeNull();
+ expect(result?.found).toBe(true);
+ expect(result?.path).toBe('/path/to/claude');
+ expect(result?.version).toBe('1.0.0');
+ expect(result?.source).toBe('nvm');
+ expect(result?.message).toContain('Found via NVM');
+ expect(result?.message).toContain('/path/to/claude');
+ });
+
+ it('should include path in message', () => {
+ const result = buildClaudeDetectionResult(
+ '/home/user/.nvm/versions/node/v22.17.0/bin/claude',
+ { valid: true, version: '2.0.0', message: 'OK' },
+ 'nvm',
+ 'Detected Claude CLI'
+ );
+
+ expect(result?.message).toContain('Detected Claude CLI');
+ expect(result?.message).toContain('/home/user/.nvm/versions/node/v22.17.0/bin/claude');
+ });
+ });
+});
diff --git a/apps/frontend/src/main/__tests__/env-handlers-claude-cli.test.ts b/apps/frontend/src/main/__tests__/env-handlers-claude-cli.test.ts
new file mode 100644
index 0000000000..bbcbdc354a
--- /dev/null
+++ b/apps/frontend/src/main/__tests__/env-handlers-claude-cli.test.ts
@@ -0,0 +1,232 @@
+import { EventEmitter } from 'events';
+import path from 'path';
+import { beforeEach, describe, expect, it, vi } from 'vitest';
+import { IPC_CHANNELS } from '../../shared/constants';
+const {
+ mockGetClaudeCliInvocation,
+ mockGetClaudeCliInvocationAsync,
+ mockGetProject,
+ spawnMock,
+ mockIpcMain,
+} = vi.hoisted(() => {
+ const ipcMain = new (class {
+ handlers = new Map();
+
+ handle(channel: string, handler: Function): void {
+ this.handlers.set(channel, handler);
+ }
+
+ getHandler(channel: string): Function | undefined {
+ return this.handlers.get(channel);
+ }
+ })();
+
+ return {
+ mockGetClaudeCliInvocation: vi.fn(),
+ mockGetClaudeCliInvocationAsync: vi.fn(),
+ mockGetProject: vi.fn(),
+ spawnMock: vi.fn(),
+ mockIpcMain: ipcMain,
+ };
+});
+
+vi.mock('../claude-cli-utils', () => ({
+ getClaudeCliInvocation: mockGetClaudeCliInvocation,
+ getClaudeCliInvocationAsync: mockGetClaudeCliInvocationAsync,
+}));
+
+vi.mock('../project-store', () => ({
+ projectStore: {
+ getProject: mockGetProject,
+ },
+}));
+
+vi.mock('child_process', () => ({
+ spawn: spawnMock,
+}));
+
+vi.mock('electron', () => ({
+ app: {
+ getPath: vi.fn((name: string) => {
+ if (name === 'userData') return path.join('/tmp', 'userData');
+ return '/tmp';
+ }),
+ },
+ ipcMain: mockIpcMain,
+}));
+
+import { registerEnvHandlers } from '../ipc-handlers/env-handlers';
+
+function createProc(): EventEmitter & { stdout?: EventEmitter; stderr?: EventEmitter } {
+ const proc = new EventEmitter() as EventEmitter & {
+ stdout?: EventEmitter;
+ stderr?: EventEmitter;
+ };
+ proc.stdout = new EventEmitter();
+ proc.stderr = new EventEmitter();
+ return proc;
+}
+
+// Helper to flush all pending promises (needed for async mock resolution)
+function flushPromises(): Promise {
+ return new Promise(resolve => setTimeout(resolve, 0));
+}
+
+describe('env-handlers Claude CLI usage', () => {
+ beforeEach(() => {
+ mockGetClaudeCliInvocation.mockReset();
+ mockGetClaudeCliInvocationAsync.mockReset();
+ mockGetProject.mockReset();
+ spawnMock.mockReset();
+ });
+
+ it('uses resolved Claude CLI path/env for auth checks', async () => {
+ const claudeEnv = { PATH: '/opt/claude/bin:/usr/bin' };
+ const command = '/opt/claude/bin/claude';
+ mockGetClaudeCliInvocationAsync.mockResolvedValue({
+ command,
+ env: claudeEnv,
+ });
+ mockGetProject.mockReturnValue({ id: 'p1', path: '/tmp/project' });
+
+ const procs: ReturnType[] = [];
+ spawnMock.mockImplementation(() => {
+ const proc = createProc();
+ procs.push(proc);
+ return proc;
+ });
+
+ registerEnvHandlers(() => null);
+ const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH);
+ if (!handler) {
+ throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered');
+ }
+
+ const resultPromise = handler({}, 'p1');
+ // Wait for async CLI resolution before checking spawn
+ await flushPromises();
+ expect(spawnMock).toHaveBeenCalledTimes(1);
+ expect(spawnMock).toHaveBeenCalledWith(
+ command,
+ ['--version'],
+ expect.objectContaining({ cwd: '/tmp/project', env: claudeEnv, shell: false })
+ );
+
+ procs[0].emit('close', 0);
+ await Promise.resolve();
+
+ expect(spawnMock).toHaveBeenCalledTimes(2);
+ expect(spawnMock).toHaveBeenCalledWith(
+ command,
+ ['api', '--help'],
+ expect.objectContaining({ cwd: '/tmp/project', env: claudeEnv, shell: false })
+ );
+
+ procs[1].emit('close', 0);
+
+ const result = await resultPromise;
+ expect(result).toEqual({ success: true, data: { success: true, authenticated: true } });
+ });
+
+ it('uses resolved Claude CLI path/env for setup-token', async () => {
+ const claudeEnv = { PATH: '/opt/claude/bin:/usr/bin' };
+ const command = '/opt/claude/bin/claude';
+ mockGetClaudeCliInvocationAsync.mockResolvedValue({
+ command,
+ env: claudeEnv,
+ });
+ mockGetProject.mockReturnValue({ id: 'p2', path: '/tmp/project' });
+
+ const proc = createProc();
+ spawnMock.mockReturnValue(proc);
+
+ registerEnvHandlers(() => null);
+ const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_INVOKE_CLAUDE_SETUP);
+ if (!handler) {
+ throw new Error('ENV_INVOKE_CLAUDE_SETUP handler not registered');
+ }
+
+ const resultPromise = handler({}, 'p2');
+ // Wait for async CLI resolution before checking spawn
+ await flushPromises();
+ expect(spawnMock).toHaveBeenCalledWith(
+ command,
+ ['setup-token'],
+ expect.objectContaining({
+ cwd: '/tmp/project',
+ env: claudeEnv,
+ shell: false,
+ stdio: 'inherit'
+ })
+ );
+
+ proc.emit('close', 0);
+ const result = await resultPromise;
+ expect(result).toEqual({ success: true, data: { success: true, authenticated: true } });
+ });
+
+ it('returns an error when Claude CLI resolution throws', async () => {
+ mockGetClaudeCliInvocationAsync.mockRejectedValue(new Error('Claude CLI exploded'));
+ mockGetProject.mockReturnValue({ id: 'p3', path: '/tmp/project' });
+
+ registerEnvHandlers(() => null);
+ const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH);
+ if (!handler) {
+ throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered');
+ }
+
+ const result = await handler({}, 'p3');
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('Claude CLI exploded');
+ expect(spawnMock).not.toHaveBeenCalled();
+ });
+
+ it('returns an error when Claude CLI command is missing', async () => {
+ mockGetClaudeCliInvocationAsync.mockResolvedValue({ command: '', env: {} });
+ mockGetProject.mockReturnValue({ id: 'p4', path: '/tmp/project' });
+
+ registerEnvHandlers(() => null);
+ const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH);
+ if (!handler) {
+ throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered');
+ }
+
+ const result = await handler({}, 'p4');
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('Claude CLI path not resolved');
+ expect(spawnMock).not.toHaveBeenCalled();
+ });
+
+ it('returns an error when Claude CLI exits with a non-zero code', async () => {
+ const claudeEnv = { PATH: '/opt/claude/bin:/usr/bin' };
+ const command = '/opt/claude/bin/claude';
+ mockGetClaudeCliInvocationAsync.mockResolvedValue({
+ command,
+ env: claudeEnv,
+ });
+ mockGetProject.mockReturnValue({ id: 'p5', path: '/tmp/project' });
+
+ const proc = createProc();
+ spawnMock.mockReturnValue(proc);
+
+ registerEnvHandlers(() => null);
+ const handler = mockIpcMain.getHandler(IPC_CHANNELS.ENV_CHECK_CLAUDE_AUTH);
+ if (!handler) {
+ throw new Error('ENV_CHECK_CLAUDE_AUTH handler not registered');
+ }
+
+ const resultPromise = handler({}, 'p5');
+ // Wait for async CLI resolution before checking spawn
+ await flushPromises();
+ expect(spawnMock).toHaveBeenCalledWith(
+ command,
+ ['--version'],
+ expect.objectContaining({ cwd: '/tmp/project', env: claudeEnv, shell: false })
+ );
+ proc.emit('close', 1);
+
+ const result = await resultPromise;
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('Claude CLI not found');
+ });
+});
diff --git a/apps/frontend/src/main/__tests__/insights-config.test.ts b/apps/frontend/src/main/__tests__/insights-config.test.ts
new file mode 100644
index 0000000000..5775d65ab0
--- /dev/null
+++ b/apps/frontend/src/main/__tests__/insights-config.test.ts
@@ -0,0 +1,99 @@
+/**
+ * @vitest-environment node
+ */
+import path from 'path';
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import { InsightsConfig } from '../insights/config';
+
+vi.mock('electron', () => ({
+ app: {
+ getAppPath: () => '/app',
+ getPath: () => '/tmp',
+ isPackaged: false
+ }
+}));
+
+vi.mock('../rate-limit-detector', () => ({
+ getProfileEnv: () => ({ CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token' })
+}));
+
+const mockGetApiProfileEnv = vi.fn();
+vi.mock('../services/profile', () => ({
+ getAPIProfileEnv: (...args: unknown[]) => mockGetApiProfileEnv(...args)
+}));
+
+const mockGetPythonEnv = vi.fn();
+vi.mock('../python-env-manager', () => ({
+ pythonEnvManager: {
+ getPythonEnv: () => mockGetPythonEnv()
+ }
+}));
+
+describe('InsightsConfig', () => {
+ const originalEnv = { ...process.env };
+
+ beforeEach(() => {
+ process.env = { ...originalEnv, TEST_ENV: 'ok' };
+ mockGetApiProfileEnv.mockResolvedValue({
+ ANTHROPIC_BASE_URL: 'https://api.z.ai',
+ ANTHROPIC_AUTH_TOKEN: 'key'
+ });
+ mockGetPythonEnv.mockReturnValue({ PYTHONPATH: '/site-packages' });
+ });
+
+ afterEach(() => {
+ process.env = { ...originalEnv };
+ vi.clearAllMocks();
+ vi.restoreAllMocks();
+ });
+
+ it('should build process env with python and profile settings', async () => {
+ const config = new InsightsConfig();
+ vi.spyOn(config, 'loadAutoBuildEnv').mockReturnValue({ CUSTOM_ENV: '1' });
+ vi.spyOn(config, 'getAutoBuildSourcePath').mockReturnValue('/backend');
+
+ const env = await config.getProcessEnv();
+
+ expect(env.TEST_ENV).toBe('ok');
+ expect(env.CUSTOM_ENV).toBe('1');
+ expect(env.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token');
+ expect(env.ANTHROPIC_BASE_URL).toBe('https://api.z.ai');
+ expect(env.ANTHROPIC_AUTH_TOKEN).toBe('key');
+ expect(env.PYTHONPATH).toBe(['/site-packages', '/backend'].join(path.delimiter));
+ });
+
+ it('should clear ANTHROPIC env vars in OAuth mode when no API profile is set', async () => {
+ const config = new InsightsConfig();
+ mockGetApiProfileEnv.mockResolvedValue({});
+ process.env = {
+ ...originalEnv,
+ ANTHROPIC_AUTH_TOKEN: 'stale-token',
+ ANTHROPIC_BASE_URL: 'https://stale.example'
+ };
+
+ const env = await config.getProcessEnv();
+
+ expect(env.ANTHROPIC_AUTH_TOKEN).toBe('');
+ expect(env.ANTHROPIC_BASE_URL).toBe('');
+ });
+
+ it('should set PYTHONPATH only to auto-build path when python env has none', async () => {
+ const config = new InsightsConfig();
+ mockGetPythonEnv.mockReturnValue({});
+ vi.spyOn(config, 'getAutoBuildSourcePath').mockReturnValue('/backend');
+
+ const env = await config.getProcessEnv();
+
+ expect(env.PYTHONPATH).toBe('/backend');
+ });
+
+ it('should keep PYTHONPATH from python env when auto-build path is missing', async () => {
+ const config = new InsightsConfig();
+ mockGetPythonEnv.mockReturnValue({ PYTHONPATH: '/site-packages' });
+ vi.spyOn(config, 'getAutoBuildSourcePath').mockReturnValue(null);
+
+ const env = await config.getProcessEnv();
+
+ expect(env.PYTHONPATH).toBe('/site-packages');
+ });
+});
diff --git a/apps/frontend/src/main/__tests__/ipc-handlers.test.ts b/apps/frontend/src/main/__tests__/ipc-handlers.test.ts
index 86699e5c7c..c969ca335a 100644
--- a/apps/frontend/src/main/__tests__/ipc-handlers.test.ts
+++ b/apps/frontend/src/main/__tests__/ipc-handlers.test.ts
@@ -139,7 +139,8 @@ function cleanupTestDirs(): void {
}
}
-describe('IPC Handlers', () => {
+// Increase timeout for all tests in this file due to dynamic imports and setup overhead
+describe('IPC Handlers', { timeout: 15000 }, () => {
let ipcMain: EventEmitter & {
handlers: Map;
invokeHandler: (channel: string, event: unknown, ...args: unknown[]) => Promise;
@@ -519,7 +520,8 @@ describe('IPC Handlers', () => {
expect(mockMainWindow.webContents.send).toHaveBeenCalledWith(
'task:log',
'task-1',
- 'Test log message'
+ 'Test log message',
+ undefined // projectId is undefined when task not found
);
});
@@ -532,7 +534,8 @@ describe('IPC Handlers', () => {
expect(mockMainWindow.webContents.send).toHaveBeenCalledWith(
'task:error',
'task-1',
- 'Test error message'
+ 'Test error message',
+ undefined // projectId is undefined when task not found
);
});
@@ -556,7 +559,8 @@ describe('IPC Handlers', () => {
expect(mockMainWindow.webContents.send).toHaveBeenCalledWith(
'task:statusChange',
'task-1',
- 'human_review'
+ 'human_review',
+ expect.any(String) // projectId for multi-project filtering
);
});
});
diff --git a/apps/frontend/src/main/agent/agent-manager.ts b/apps/frontend/src/main/agent/agent-manager.ts
index a0d65d1fae..962259e3e5 100644
--- a/apps/frontend/src/main/agent/agent-manager.ts
+++ b/apps/frontend/src/main/agent/agent-manager.ts
@@ -87,14 +87,14 @@ export class AgentManager extends EventEmitter {
/**
* Start spec creation process
*/
- startSpecCreation(
+ async startSpecCreation(
taskId: string,
projectPath: string,
taskDescription: string,
specDir?: string,
metadata?: SpecCreationMetadata,
baseBranch?: string
- ): void {
+ ): Promise {
// Pre-flight auth check: Verify active profile has valid authentication
const profileManager = getClaudeProfileManager();
if (!profileManager.hasValidAuth()) {
@@ -152,22 +152,27 @@ export class AgentManager extends EventEmitter {
}
}
+ // Workspace mode: --direct skips worktree isolation (default is isolated for safety)
+ if (metadata?.useWorktree === false) {
+ args.push('--direct');
+ }
+
// Store context for potential restart
this.storeTaskContext(taskId, projectPath, '', {}, true, taskDescription, specDir, metadata, baseBranch);
// Note: This is spec-creation but it chains to task-execution via run.py
- this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution');
+ await this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution');
}
/**
* Start task execution (run.py)
*/
- startTaskExecution(
+ async startTaskExecution(
taskId: string,
projectPath: string,
specId: string,
options: TaskExecutionOptions = {}
- ): void {
+ ): Promise {
// Pre-flight auth check: Verify active profile has valid authentication
const profileManager = getClaudeProfileManager();
if (!profileManager.hasValidAuth()) {
@@ -200,6 +205,11 @@ export class AgentManager extends EventEmitter {
// Force: When user starts a task from the UI, that IS their approval
args.push('--force');
+ // Workspace mode: --direct skips worktree isolation (default is isolated for safety)
+ if (options.useWorktree === false) {
+ args.push('--direct');
+ }
+
// Pass base branch if specified (ensures worktrees are created from the correct branch)
if (options.baseBranch) {
args.push('--base-branch', options.baseBranch);
@@ -213,17 +223,17 @@ export class AgentManager extends EventEmitter {
// Store context for potential restart
this.storeTaskContext(taskId, projectPath, specId, options, false);
- this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution');
+ await this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'task-execution');
}
/**
* Start QA process
*/
- startQAProcess(
+ async startQAProcess(
taskId: string,
projectPath: string,
specId: string
- ): void {
+ ): Promise {
const autoBuildSource = this.processManager.getAutoBuildSourcePath();
if (!autoBuildSource) {
@@ -243,7 +253,7 @@ export class AgentManager extends EventEmitter {
const args = [runPath, '--spec', specId, '--project-dir', projectPath, '--qa'];
- this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'qa-process');
+ await this.processManager.spawnProcess(taskId, autoBuildSource, args, combinedEnv, 'qa-process');
}
/**
diff --git a/apps/frontend/src/main/agent/agent-process.test.ts b/apps/frontend/src/main/agent/agent-process.test.ts
new file mode 100644
index 0000000000..c06b8f6824
--- /dev/null
+++ b/apps/frontend/src/main/agent/agent-process.test.ts
@@ -0,0 +1,494 @@
+/**
+ * Integration tests for AgentProcessManager
+ * Tests API profile environment variable injection into spawnProcess
+ *
+ * Story 2.3: Env Var Injection - AC1, AC2, AC3, AC4
+ */
+
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import { EventEmitter } from 'events';
+
+// Create a mock process object that will be returned by spawn
+function createMockProcess() {
+ return {
+ stdout: { on: vi.fn() },
+ stderr: { on: vi.fn() },
+ on: vi.fn((event: string, callback: any) => {
+ if (event === 'exit') {
+ // Simulate immediate exit with code 0
+ setTimeout(() => callback(0), 10);
+ }
+ }),
+ kill: vi.fn()
+ };
+}
+
+// Mock child_process - must be BEFORE imports of modules that use it
+const spawnCalls: Array<{ command: string; args: string[]; options: { env: Record; cwd?: string; [key: string]: unknown } }> = [];
+
+vi.mock('child_process', async (importOriginal) => {
+ const actual = await importOriginal();
+ const mockSpawn = vi.fn((command: string, args: string[], options: { env: Record; cwd?: string; [key: string]: unknown }) => {
+ // Record the call for test assertions
+ spawnCalls.push({ command, args, options });
+ return createMockProcess();
+ });
+
+ return {
+ ...actual,
+ spawn: mockSpawn,
+ execSync: vi.fn((command: string) => {
+ if (command.includes('git')) {
+ return '/fake/path';
+ }
+ return '';
+ })
+ };
+});
+
+// Mock project-initializer to avoid child_process.execSync issues
+vi.mock('../project-initializer', () => ({
+ getAutoBuildPath: vi.fn(() => '/fake/auto-build'),
+ isInitialized: vi.fn(() => true),
+ initializeProject: vi.fn(),
+ getProjectStorePath: vi.fn(() => '/fake/store/path')
+}));
+
+// Mock project-store BEFORE agent-process imports it
+vi.mock('../project-store', () => ({
+ projectStore: {
+ getProject: vi.fn(),
+ listProjects: vi.fn(),
+ createProject: vi.fn(),
+ updateProject: vi.fn(),
+ deleteProject: vi.fn(),
+ getProjectSettings: vi.fn(),
+ updateProjectSettings: vi.fn()
+ }
+}));
+
+// Mock claude-profile-manager
+vi.mock('../claude-profile-manager', () => ({
+ getClaudeProfileManager: vi.fn(() => ({
+ getProfilePath: vi.fn(() => '/fake/profile/path'),
+ ensureProfileDir: vi.fn(),
+ readProfile: vi.fn(),
+ writeProfile: vi.fn(),
+ deleteProfile: vi.fn()
+ }))
+}));
+
+// Mock dependencies
+vi.mock('../services/profile', () => ({
+ getAPIProfileEnv: vi.fn()
+}));
+
+vi.mock('../rate-limit-detector', () => ({
+ getProfileEnv: vi.fn(() => ({})),
+ detectRateLimit: vi.fn(() => ({ isRateLimited: false })),
+ createSDKRateLimitInfo: vi.fn(),
+ detectAuthFailure: vi.fn(() => ({ isAuthFailure: false }))
+}));
+
+vi.mock('../python-detector', () => ({
+ findPythonCommand: vi.fn(() => 'python'),
+ parsePythonCommand: vi.fn(() => ['python', []])
+}));
+
+vi.mock('electron', () => ({
+ app: {
+ getAppPath: vi.fn(() => '/fake/app/path')
+ }
+}));
+
+// Import AFTER all mocks are set up
+import { AgentProcessManager } from './agent-process';
+import { AgentState } from './agent-state';
+import { AgentEvents } from './agent-events';
+import * as profileService from '../services/profile';
+import * as rateLimitDetector from '../rate-limit-detector';
+
+describe('AgentProcessManager - API Profile Env Injection (Story 2.3)', () => {
+ let processManager: AgentProcessManager;
+ let state: AgentState;
+ let events: AgentEvents;
+ let emitter: EventEmitter;
+
+ beforeEach(() => {
+ // Reset all mocks and spawn calls
+ vi.clearAllMocks();
+ spawnCalls.length = 0;
+
+ // Clear environment variables that could interfere with tests
+ delete process.env.ANTHROPIC_AUTH_TOKEN;
+ delete process.env.ANTHROPIC_BASE_URL;
+ delete process.env.CLAUDE_CODE_OAUTH_TOKEN;
+
+ // Initialize components
+ state = new AgentState();
+ events = new AgentEvents();
+ emitter = new EventEmitter();
+ processManager = new AgentProcessManager(state, events, emitter);
+ });
+
+ afterEach(() => {
+ processManager.killAllProcesses();
+ });
+
+ describe('AC1: API Profile Env Var Injection', () => {
+ it('should inject ANTHROPIC_BASE_URL when active profile has baseUrl', async () => {
+ const mockApiProfileEnv = {
+ ANTHROPIC_BASE_URL: 'https://custom.api.com',
+ ANTHROPIC_AUTH_TOKEN: 'sk-test-key'
+ };
+
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv);
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ expect(spawnCalls).toHaveLength(1);
+ expect(spawnCalls[0].command).toBe('python');
+ expect(spawnCalls[0].args).toContain('run.py');
+ expect(spawnCalls[0].options.env).toMatchObject({
+ ANTHROPIC_BASE_URL: 'https://custom.api.com',
+ ANTHROPIC_AUTH_TOKEN: 'sk-test-key'
+ });
+ });
+
+ it('should inject ANTHROPIC_AUTH_TOKEN when active profile has apiKey', async () => {
+ const mockApiProfileEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-custom-key-12345678'
+ };
+
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv);
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ expect(spawnCalls).toHaveLength(1);
+ expect(spawnCalls[0].options.env.ANTHROPIC_AUTH_TOKEN).toBe('sk-custom-key-12345678');
+ });
+
+ it('should inject model env vars when active profile has models configured', async () => {
+ const mockApiProfileEnv = {
+ ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022',
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022',
+ ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022',
+ ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022'
+ };
+
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv);
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ expect(spawnCalls).toHaveLength(1);
+ expect(spawnCalls[0].options.env).toMatchObject({
+ ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022',
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022',
+ ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022',
+ ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022'
+ });
+ });
+
+ it('should give API profile env vars highest precedence over extraEnv', async () => {
+ const extraEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-extra-token',
+ ANTHROPIC_BASE_URL: 'https://extra.com'
+ };
+
+ const mockApiProfileEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-profile-token',
+ ANTHROPIC_BASE_URL: 'https://profile.com'
+ };
+
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv);
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], extraEnv, 'task-execution');
+
+ expect(spawnCalls).toHaveLength(1);
+ // API profile should override extraEnv
+ expect(spawnCalls[0].options.env.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-token');
+ expect(spawnCalls[0].options.env.ANTHROPIC_BASE_URL).toBe('https://profile.com');
+ });
+ });
+
+ describe('AC2: OAuth Mode (No Active Profile)', () => {
+ let originalEnv: NodeJS.ProcessEnv;
+
+ beforeEach(() => {
+ // Save original environment before each test
+ originalEnv = { ...process.env };
+ });
+
+ afterEach(() => {
+ // Restore original environment after each test
+ process.env = originalEnv;
+ });
+
+ it('should NOT set ANTHROPIC_AUTH_TOKEN when no active profile (OAuth mode)', async () => {
+ // Return empty object = OAuth mode
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({});
+
+ // Set OAuth token via getProfileEnv (existing flow)
+ vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue({
+ CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-123'
+ });
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ expect(spawnCalls).toHaveLength(1);
+ const envArg = spawnCalls[0].options.env as Record;
+ expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-123');
+ // OAuth mode clears ANTHROPIC_AUTH_TOKEN with empty string (not undefined)
+ expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('');
+ });
+
+ it('should return empty object from getAPIProfileEnv when activeProfileId is null', async () => {
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({});
+
+ const result = await profileService.getAPIProfileEnv();
+ expect(result).toEqual({});
+ });
+
+ it('should clear stale ANTHROPIC_AUTH_TOKEN from process.env when switching to OAuth mode', async () => {
+ // Simulate process.env having stale ANTHROPIC_* vars from previous session
+ process.env = {
+ ...originalEnv,
+ ANTHROPIC_AUTH_TOKEN: 'stale-token-from-env',
+ ANTHROPIC_BASE_URL: 'https://stale.example.com'
+ };
+
+ // OAuth mode - no active API profile
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({});
+
+ // Set OAuth token
+ vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue({
+ CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-456'
+ });
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ const envArg = spawnCalls[0].options.env as Record;
+
+ // OAuth token should be present
+ expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-456');
+
+ // Stale ANTHROPIC_* vars should be cleared (empty string overrides process.env)
+ expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('');
+ expect(envArg.ANTHROPIC_BASE_URL).toBe('');
+ });
+
+ it('should clear stale ANTHROPIC_BASE_URL when switching to OAuth mode', async () => {
+ process.env = {
+ ...originalEnv,
+ ANTHROPIC_BASE_URL: 'https://old-custom-endpoint.com'
+ };
+
+ // OAuth mode
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({});
+ vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue({
+ CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-789'
+ });
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ const envArg = spawnCalls[0].options.env as Record;
+
+ // Should clear the base URL (so Python uses default api.anthropic.com)
+ expect(envArg.ANTHROPIC_BASE_URL).toBe('');
+ expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-789');
+ });
+
+ it('should NOT clear ANTHROPIC_* vars when API Profile is active', async () => {
+ process.env = {
+ ...originalEnv,
+ ANTHROPIC_AUTH_TOKEN: 'old-token-in-env'
+ };
+
+ // API Profile mode - active profile
+ const mockApiProfileEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-profile-active',
+ ANTHROPIC_BASE_URL: 'https://active-profile.com'
+ };
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv);
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ const envArg = spawnCalls[0].options.env as Record;
+
+ // Should use API profile vars, NOT clear them
+ expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-active');
+ expect(envArg.ANTHROPIC_BASE_URL).toBe('https://active-profile.com');
+ });
+ });
+
+ describe('AC4: No API Key Logging', () => {
+ it('should never log full API keys in spawn env vars', async () => {
+ const mockApiProfileEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-sensitive-api-key-12345678',
+ ANTHROPIC_BASE_URL: 'https://api.example.com'
+ };
+
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv);
+
+ // Mock ALL console methods to capture any debug/error output
+ const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
+ const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
+ const consoleWarnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
+ const consoleDebugSpy = vi.spyOn(console, 'debug').mockImplementation(() => {});
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ // Get the env object passed to spawn
+ const envArg = spawnCalls[0].options.env as Record;
+
+ // Verify the full API key is in the env (for Python subprocess)
+ expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('sk-sensitive-api-key-12345678');
+
+ // Collect ALL console output from all methods
+ const allLogCalls = [
+ ...consoleLogSpy.mock.calls,
+ ...consoleErrorSpy.mock.calls,
+ ...consoleWarnSpy.mock.calls,
+ ...consoleDebugSpy.mock.calls
+ ].flatMap(call => call.map(String));
+ const logString = JSON.stringify(allLogCalls);
+
+ // The full API key should NOT appear in any logs (AC4 compliance)
+ expect(logString).not.toContain('sk-sensitive-api-key-12345678');
+
+ // Restore all spies
+ consoleLogSpy.mockRestore();
+ consoleErrorSpy.mockRestore();
+ consoleWarnSpy.mockRestore();
+ consoleDebugSpy.mockRestore();
+ });
+
+ it('should not log API key even in error scenarios', async () => {
+ const mockApiProfileEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-secret-key-for-error-test',
+ ANTHROPIC_BASE_URL: 'https://api.example.com'
+ };
+
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(mockApiProfileEnv);
+
+ // Mock console methods
+ const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
+ const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ // Collect all error and log output
+ const allOutput = [
+ ...consoleErrorSpy.mock.calls,
+ ...consoleLogSpy.mock.calls
+ ].flatMap(call => call.map(arg => typeof arg === 'object' ? JSON.stringify(arg) : String(arg)));
+ const outputString = allOutput.join(' ');
+
+ // Verify API key is never exposed in logs
+ expect(outputString).not.toContain('sk-secret-key-for-error-test');
+
+ consoleErrorSpy.mockRestore();
+ consoleLogSpy.mockRestore();
+ });
+ });
+
+ describe('AC3: Profile Switching Between Builds', () => {
+ it('should allow different profiles for different spawn calls', async () => {
+ // First spawn with Profile A
+ const profileAEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-profile-a',
+ ANTHROPIC_BASE_URL: 'https://api-a.com'
+ };
+
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValueOnce(profileAEnv);
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ const firstEnv = spawnCalls[0].options.env as Record;
+ expect(firstEnv.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-a');
+
+ // Second spawn with Profile B (user switched active profile)
+ const profileBEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-profile-b',
+ ANTHROPIC_BASE_URL: 'https://api-b.com'
+ };
+
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValueOnce(profileBEnv);
+
+ await processManager.spawnProcess('task-2', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ const secondEnv = spawnCalls[1].options.env as Record;
+ expect(secondEnv.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-b');
+
+ // Verify first spawn's env is NOT affected by second spawn
+ expect(firstEnv.ANTHROPIC_AUTH_TOKEN).toBe('sk-profile-a');
+ });
+ });
+
+ describe('Integration: Combined env precedence', () => {
+ it('should merge env vars in correct precedence order', async () => {
+ const extraEnv = {
+ CUSTOM_VAR: 'from-extra'
+ };
+
+ const profileEnv = {
+ CLAUDE_CONFIG_DIR: '/custom/config'
+ };
+
+ const apiProfileEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-api-profile',
+ ANTHROPIC_BASE_URL: 'https://api-profile.com'
+ };
+
+ vi.mocked(rateLimitDetector.getProfileEnv).mockReturnValue(profileEnv);
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue(apiProfileEnv);
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], extraEnv, 'task-execution');
+
+ const envArg = spawnCalls[0].options.env as Record;
+
+ // Verify all sources are included
+ expect(envArg.CUSTOM_VAR).toBe('from-extra'); // From extraEnv
+ expect(envArg.CLAUDE_CONFIG_DIR).toBe('/custom/config'); // From profileEnv
+ expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('sk-api-profile'); // From apiProfileEnv (highest for ANTHROPIC_*)
+
+ // Verify standard Python env vars
+ expect(envArg.PYTHONUNBUFFERED).toBe('1');
+ expect(envArg.PYTHONIOENCODING).toBe('utf-8');
+ expect(envArg.PYTHONUTF8).toBe('1');
+ });
+
+ it('should call getOAuthModeClearVars and apply clearing when in OAuth mode', async () => {
+ // OAuth mode - empty API profile
+ vi.mocked(profileService.getAPIProfileEnv).mockResolvedValue({});
+
+ await processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution');
+
+ const envArg = spawnCalls[0].options.env as Record;
+
+ // Verify clearing vars are applied (empty strings for ANTHROPIC_* vars)
+ expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('');
+ expect(envArg.ANTHROPIC_BASE_URL).toBe('');
+ expect(envArg.ANTHROPIC_MODEL).toBe('');
+ expect(envArg.ANTHROPIC_DEFAULT_HAIKU_MODEL).toBe('');
+ expect(envArg.ANTHROPIC_DEFAULT_SONNET_MODEL).toBe('');
+ expect(envArg.ANTHROPIC_DEFAULT_OPUS_MODEL).toBe('');
+ });
+
+ it('should handle getAPIProfileEnv errors gracefully', async () => {
+ // Simulate service error
+ vi.mocked(profileService.getAPIProfileEnv).mockRejectedValue(new Error('Service unavailable'));
+
+ // Should not throw - should fall back to OAuth mode
+ await expect(
+ processManager.spawnProcess('task-1', '/fake/cwd', ['run.py'], {}, 'task-execution')
+ ).resolves.not.toThrow();
+
+ const envArg = spawnCalls[0].options.env as Record;
+
+ // Should have clearing vars (falls back to OAuth mode on error)
+ expect(envArg.ANTHROPIC_AUTH_TOKEN).toBe('');
+ expect(envArg.ANTHROPIC_BASE_URL).toBe('');
+ });
+ });
+});
diff --git a/apps/frontend/src/main/agent/agent-process.ts b/apps/frontend/src/main/agent/agent-process.ts
index ef045555c0..03010bf959 100644
--- a/apps/frontend/src/main/agent/agent-process.ts
+++ b/apps/frontend/src/main/agent/agent-process.ts
@@ -7,6 +7,7 @@ import { AgentState } from './agent-state';
import { AgentEvents } from './agent-events';
import { ProcessType, ExecutionProgressData } from './types';
import { detectRateLimit, createSDKRateLimitInfo, getProfileEnv, detectAuthFailure } from '../rate-limit-detector';
+import { getAPIProfileEnv } from '../services/profile';
import { projectStore } from '../project-store';
import { getClaudeProfileManager } from '../claude-profile-manager';
import { parsePythonCommand, validatePythonPath } from '../python-detector';
@@ -14,6 +15,64 @@ import { pythonEnvManager, getConfiguredPythonPath } from '../python-env-manager
import { buildMemoryEnvVars } from '../memory-env-builder';
import { readSettingsFile } from '../settings-utils';
import type { AppSettings } from '../../shared/types/settings';
+import { getOAuthModeClearVars } from './env-utils';
+import { getAugmentedEnv } from '../env-utils';
+import { getToolInfo } from '../cli-tool-manager';
+
+
+function deriveGitBashPath(gitExePath: string): string | null {
+ if (process.platform !== 'win32') {
+ return null;
+ }
+
+ try {
+ const gitDir = path.dirname(gitExePath); // e.g., D:\...\Git\mingw64\bin
+ const gitDirName = path.basename(gitDir).toLowerCase();
+
+ // Find Git installation root
+ let gitRoot: string;
+
+ if (gitDirName === 'cmd') {
+ // .../Git/cmd/git.exe -> .../Git
+ gitRoot = path.dirname(gitDir);
+ } else if (gitDirName === 'bin') {
+ // Could be .../Git/bin/git.exe OR .../Git/mingw64/bin/git.exe
+ const parent = path.dirname(gitDir);
+ const parentName = path.basename(parent).toLowerCase();
+ if (parentName === 'mingw64' || parentName === 'mingw32') {
+ // .../Git/mingw64/bin/git.exe -> .../Git
+ gitRoot = path.dirname(parent);
+ } else {
+ // .../Git/bin/git.exe -> .../Git
+ gitRoot = parent;
+ }
+ } else {
+ // Unknown structure - try to find 'bin' sibling
+ gitRoot = path.dirname(gitDir);
+ }
+
+ // Bash.exe is in Git/bin/bash.exe
+ const bashPath = path.join(gitRoot, 'bin', 'bash.exe');
+
+ if (existsSync(bashPath)) {
+ console.log('[AgentProcess] Derived git-bash path:', bashPath);
+ return bashPath;
+ }
+
+ // Fallback: check one level up if gitRoot didn't work
+ const altBashPath = path.join(path.dirname(gitRoot), 'bin', 'bash.exe');
+ if (existsSync(altBashPath)) {
+ console.log('[AgentProcess] Found git-bash at alternate path:', altBashPath);
+ return altBashPath;
+ }
+
+ console.warn('[AgentProcess] Could not find bash.exe from git path:', gitExePath);
+ return null;
+ } catch (error) {
+ console.error('[AgentProcess] Error deriving git-bash path:', error);
+ return null;
+ }
+}
/**
* Process spawning and lifecycle management
@@ -53,8 +112,31 @@ export class AgentProcessManager {
extraEnv: Record
): NodeJS.ProcessEnv {
const profileEnv = getProfileEnv();
+ // Use getAugmentedEnv() to ensure common tool paths (dotnet, homebrew, etc.)
+ // are available even when app is launched from Finder/Dock
+ const augmentedEnv = getAugmentedEnv();
+
+ // On Windows, detect and pass git-bash path for Claude Code CLI
+ // Electron can detect git via where.exe, but Python subprocess may not have the same PATH
+ const gitBashEnv: Record = {};
+ if (process.platform === 'win32' && !process.env.CLAUDE_CODE_GIT_BASH_PATH) {
+ try {
+ const gitInfo = getToolInfo('git');
+ if (gitInfo.found && gitInfo.path) {
+ const bashPath = deriveGitBashPath(gitInfo.path);
+ if (bashPath) {
+ gitBashEnv['CLAUDE_CODE_GIT_BASH_PATH'] = bashPath;
+ console.log('[AgentProcess] Setting CLAUDE_CODE_GIT_BASH_PATH:', bashPath);
+ }
+ }
+ } catch (error) {
+ console.warn('[AgentProcess] Failed to detect git-bash path:', error);
+ }
+ }
+
return {
- ...process.env,
+ ...augmentedEnv,
+ ...gitBashEnv,
...extraEnv,
...profileEnv,
PYTHONUNBUFFERED: '1',
@@ -195,6 +277,8 @@ export class AgentProcessManager {
// Auto-detect from app location (configured path was invalid or not set)
const possiblePaths = [
+ // Packaged app: backend is in extraResources (process.resourcesPath/backend)
+ ...(app.isPackaged ? [path.join(process.resourcesPath, 'backend')] : []),
// Dev mode: from dist/main -> ../../backend (apps/frontend/out/main -> apps/backend)
path.resolve(__dirname, '..', '..', '..', 'backend'),
// Alternative: from app root -> apps/backend
@@ -238,19 +322,10 @@ export class AgentProcessManager {
}
/**
- * Load environment variables from project's .auto-claude/.env file
- * This contains frontend-configured settings like memory/Graphiti configuration
+ * Parse environment variables from a .env file content.
+ * Filters out empty values to prevent overriding valid tokens from profiles.
*/
- private loadProjectEnv(projectPath: string): Record {
- // Find project by path to get autoBuildPath
- const projects = projectStore.getProjects();
- const project = projects.find((p) => p.path === projectPath);
-
- if (!project?.autoBuildPath) {
- return {};
- }
-
- const envPath = path.join(projectPath, project.autoBuildPath, '.env');
+ private parseEnvFile(envPath: string): Record {
if (!existsSync(envPath)) {
return {};
}
@@ -274,11 +349,14 @@ export class AgentProcessManager {
// Remove quotes if present
if ((value.startsWith('"') && value.endsWith('"')) ||
- (value.startsWith("'") && value.endsWith("'"))) {
+ (value.startsWith("'") && value.endsWith("'"))) {
value = value.slice(1, -1);
}
- envVars[key] = value;
+ // Skip empty values to prevent overriding valid values from other sources
+ if (value) {
+ envVars[key] = value;
+ }
}
}
@@ -288,6 +366,23 @@ export class AgentProcessManager {
}
}
+ /**
+ * Load environment variables from project's .auto-claude/.env file
+ * This contains frontend-configured settings like memory/Graphiti configuration
+ */
+ private loadProjectEnv(projectPath: string): Record {
+ // Find project by path to get autoBuildPath
+ const projects = projectStore.getProjects();
+ const project = projects.find((p) => p.path === projectPath);
+
+ if (!project?.autoBuildPath) {
+ return {};
+ }
+
+ const envPath = path.join(projectPath, project.autoBuildPath, '.env');
+ return this.parseEnvFile(envPath);
+ }
+
/**
* Load environment variables from auto-claude .env file
*/
@@ -298,50 +393,19 @@ export class AgentProcessManager {
}
const envPath = path.join(autoBuildSource, '.env');
- if (!existsSync(envPath)) {
- return {};
- }
-
- try {
- const envContent = readFileSync(envPath, 'utf-8');
- const envVars: Record = {};
-
- // Handle both Unix (\n) and Windows (\r\n) line endings
- for (const line of envContent.split(/\r?\n/)) {
- const trimmed = line.trim();
- // Skip comments and empty lines
- if (!trimmed || trimmed.startsWith('#')) {
- continue;
- }
-
- const eqIndex = trimmed.indexOf('=');
- if (eqIndex > 0) {
- const key = trimmed.substring(0, eqIndex).trim();
- let value = trimmed.substring(eqIndex + 1).trim();
-
- // Remove quotes if present
- if ((value.startsWith('"') && value.endsWith('"')) ||
- (value.startsWith("'") && value.endsWith("'"))) {
- value = value.slice(1, -1);
- }
-
- envVars[key] = value;
- }
- }
-
- return envVars;
- } catch {
- return {};
- }
+ return this.parseEnvFile(envPath);
}
- spawnProcess(
+ /**
+ * Spawn a Python process for task execution
+ */
+ async spawnProcess(
taskId: string,
cwd: string,
args: string[],
extraEnv: Record = {},
processType: ProcessType = 'task-execution'
- ): void {
+ ): Promise {
const isSpecRunner = processType === 'spec-creation';
this.killProcess(taskId);
@@ -351,13 +415,27 @@ export class AgentProcessManager {
// Get Python environment (PYTHONPATH for bundled packages, etc.)
const pythonEnv = pythonEnvManager.getPythonEnv();
- // Parse Python command to handle space-separated commands like "py -3"
+ // Get active API profile environment variables
+ let apiProfileEnv: Record = {};
+ try {
+ apiProfileEnv = await getAPIProfileEnv();
+ } catch (error) {
+ console.error('[Agent Process] Failed to get API profile env:', error);
+ // Continue with empty profile env (falls back to OAuth mode)
+ }
+
+ // Get OAuth mode clearing vars (clears stale ANTHROPIC_* vars when in OAuth mode)
+ const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv);
+
+ // Parse Python commandto handle space-separated commands like "py -3"
const [pythonCommand, pythonBaseArgs] = parsePythonCommand(this.getPythonPath());
const childProcess = spawn(pythonCommand, [...pythonBaseArgs, ...args], {
cwd,
env: {
...env, // Already includes process.env, extraEnv, profileEnv, PYTHONUNBUFFERED, PYTHONUTF8
- ...pythonEnv // Include Python environment (PYTHONPATH for bundled packages)
+ ...pythonEnv, // Include Python environment (PYTHONPATH for bundled packages)
+ ...oauthModeClearVars, // Clear stale ANTHROPIC_* vars when in OAuth mode
+ ...apiProfileEnv // Include active API profile config (highest priority for ANTHROPIC_* vars)
}
});
diff --git a/apps/frontend/src/main/agent/agent-queue.ts b/apps/frontend/src/main/agent/agent-queue.ts
index 913290b35c..1d18be761b 100644
--- a/apps/frontend/src/main/agent/agent-queue.ts
+++ b/apps/frontend/src/main/agent/agent-queue.ts
@@ -7,8 +7,9 @@ import { AgentEvents } from './agent-events';
import { AgentProcessManager } from './agent-process';
import { RoadmapConfig } from './types';
import type { IdeationConfig, Idea } from '../../shared/types';
-import { MODEL_ID_MAP } from '../../shared/constants';
import { detectRateLimit, createSDKRateLimitInfo, getProfileEnv } from '../rate-limit-detector';
+import { getAPIProfileEnv } from '../services/profile';
+import { getOAuthModeClearVars } from './env-utils';
import { debugLog, debugError } from '../../shared/utils/debug-logger';
import { parsePythonCommand } from '../python-detector';
import { pythonEnvManager } from '../python-env-manager';
@@ -37,6 +38,40 @@ export class AgentQueueManager {
this.emitter = emitter;
}
+ /**
+ * Ensure Python environment is ready before spawning processes.
+ * Prevents the race condition where generation starts before dependencies are installed,
+ * which would cause it to fall back to system Python and fail with ModuleNotFoundError.
+ *
+ * @param projectId - The project ID for error event emission
+ * @param eventType - The error event type to emit on failure
+ * @returns true if environment is ready, false if initialization failed (error already emitted)
+ */
+ private async ensurePythonEnvReady(
+ projectId: string,
+ eventType: 'ideation-error' | 'roadmap-error'
+ ): Promise {
+ const autoBuildSource = this.processManager.getAutoBuildSourcePath();
+
+ if (!pythonEnvManager.isEnvReady()) {
+ debugLog('[Agent Queue] Python environment not ready, waiting for initialization...');
+ if (autoBuildSource) {
+ const status = await pythonEnvManager.initialize(autoBuildSource);
+ if (!status.ready) {
+ debugError('[Agent Queue] Python environment initialization failed:', status.error);
+ this.emitter.emit(eventType, projectId, `Python environment not ready: ${status.error || 'initialization failed'}`);
+ return false;
+ }
+ debugLog('[Agent Queue] Python environment now ready');
+ } else {
+ debugError('[Agent Queue] Cannot initialize Python - auto-build source not found');
+ this.emitter.emit(eventType, projectId, 'Python environment not ready: auto-build source not found');
+ return false;
+ }
+ }
+ return true;
+ }
+
/**
* Start roadmap generation process
*
@@ -44,14 +79,14 @@ export class AgentQueueManager {
* This allows refreshing competitor data independently of the general roadmap refresh.
* Use when user explicitly wants new competitor research.
*/
- startRoadmapGeneration(
+ async startRoadmapGeneration(
projectId: string,
projectPath: string,
refresh: boolean = false,
enableCompetitorAnalysis: boolean = false,
refreshCompetitorAnalysis: boolean = false,
config?: RoadmapConfig
- ): void {
+ ): Promise {
debugLog('[Agent Queue] Starting roadmap generation:', {
projectId,
projectPath,
@@ -94,9 +129,9 @@ export class AgentQueueManager {
}
// Add model and thinking level from config
+ // Pass shorthand (opus/sonnet/haiku) - backend resolves using API profile env vars
if (config?.model) {
- const modelId = MODEL_ID_MAP[config.model] || MODEL_ID_MAP['opus'];
- args.push('--model', modelId);
+ args.push('--model', config.model);
}
if (config?.thinkingLevel) {
args.push('--thinking-level', config.thinkingLevel);
@@ -105,18 +140,18 @@ export class AgentQueueManager {
debugLog('[Agent Queue] Spawning roadmap process with args:', args);
// Use projectId as taskId for roadmap operations
- this.spawnRoadmapProcess(projectId, projectPath, args);
+ await this.spawnRoadmapProcess(projectId, projectPath, args);
}
/**
* Start ideation generation process
*/
- startIdeationGeneration(
+ async startIdeationGeneration(
projectId: string,
projectPath: string,
config: IdeationConfig,
refresh: boolean = false
- ): void {
+ ): Promise {
debugLog('[Agent Queue] Starting ideation generation:', {
projectId,
projectPath,
@@ -170,9 +205,9 @@ export class AgentQueueManager {
}
// Add model and thinking level from config
+ // Pass shorthand (opus/sonnet/haiku) - backend resolves using API profile env vars
if (config.model) {
- const modelId = MODEL_ID_MAP[config.model] || MODEL_ID_MAP['opus'];
- args.push('--model', modelId);
+ args.push('--model', config.model);
}
if (config.thinkingLevel) {
args.push('--thinking-level', config.thinkingLevel);
@@ -181,19 +216,28 @@ export class AgentQueueManager {
debugLog('[Agent Queue] Spawning ideation process with args:', args);
// Use projectId as taskId for ideation operations
- this.spawnIdeationProcess(projectId, projectPath, args);
+ await this.spawnIdeationProcess(projectId, projectPath, args);
}
/**
* Spawn a Python process for ideation generation
*/
- private spawnIdeationProcess(
+ private async spawnIdeationProcess(
projectId: string,
projectPath: string,
args: string[]
- ): void {
+ ): Promise {
debugLog('[Agent Queue] Spawning ideation process:', { projectId, projectPath });
+ // Run from auto-claude source directory so imports work correctly
+ const autoBuildSource = this.processManager.getAutoBuildSourcePath();
+ const cwd = autoBuildSource || process.cwd();
+
+ // Ensure Python environment is ready before spawning
+ if (!await this.ensurePythonEnvReady(projectId, 'ideation-error')) {
+ return;
+ }
+
// Kill existing process for this project if any
const wasKilled = this.processManager.killProcess(projectId);
if (wasKilled) {
@@ -204,9 +248,6 @@ export class AgentQueueManager {
const spawnId = this.state.generateSpawnId();
debugLog('[Agent Queue] Generated spawn ID:', spawnId);
- // Run from auto-claude source directory so imports work correctly
- const autoBuildSource = this.processManager.getAutoBuildSourcePath();
- const cwd = autoBuildSource || process.cwd();
// Get combined environment variables
const combinedEnv = this.processManager.getCombinedEnv(projectPath);
@@ -214,6 +255,12 @@ export class AgentQueueManager {
// Get active Claude profile environment (CLAUDE_CODE_OAUTH_TOKEN if not default)
const profileEnv = getProfileEnv();
+ // Get active API profile environment variables
+ const apiProfileEnv = await getAPIProfileEnv();
+
+ // Get OAuth mode clearing vars (clears stale ANTHROPIC_* vars when in OAuth mode)
+ const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv);
+
// Get Python path from process manager (uses venv if configured)
const pythonPath = this.processManager.getPythonPath();
@@ -234,28 +281,30 @@ export class AgentQueueManager {
// 1. process.env (system)
// 2. pythonEnv (bundled packages environment)
// 3. combinedEnv (auto-claude/.env for CLI usage)
- // 4. profileEnv (Electron app OAuth token - highest priority)
- // 5. Our specific overrides
+ // 4. oauthModeClearVars (clear stale ANTHROPIC_* vars when in OAuth mode)
+ // 5. profileEnv (Electron app OAuth token)
+ // 6. apiProfileEnv (Active API profile config - highest priority for ANTHROPIC_* vars)
+ // 7. Our specific overrides
const finalEnv = {
...process.env,
...pythonEnv,
...combinedEnv,
+ ...oauthModeClearVars,
...profileEnv,
+ ...apiProfileEnv,
PYTHONPATH: combinedPythonPath,
PYTHONUNBUFFERED: '1',
PYTHONUTF8: '1'
};
- // Debug: Show OAuth token source
+ // Debug: Show OAuth token source (token values intentionally omitted for security - AC4)
const tokenSource = profileEnv['CLAUDE_CODE_OAUTH_TOKEN']
? 'Electron app profile'
: (combinedEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'auto-claude/.env' : 'not found');
- const oauthToken = (finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN'];
- const hasToken = !!oauthToken;
+ const hasToken = !!(finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN'];
debugLog('[Agent Queue] OAuth token status:', {
source: tokenSource,
- hasToken,
- tokenPreview: hasToken ? oauthToken?.substring(0, 20) + '...' : 'none'
+ hasToken
});
// Parse Python command to handle space-separated commands like "py -3"
@@ -500,13 +549,22 @@ export class AgentQueueManager {
/**
* Spawn a Python process for roadmap generation
*/
- private spawnRoadmapProcess(
+ private async spawnRoadmapProcess(
projectId: string,
projectPath: string,
args: string[]
- ): void {
+ ): Promise {
debugLog('[Agent Queue] Spawning roadmap process:', { projectId, projectPath });
+ // Run from auto-claude source directory so imports work correctly
+ const autoBuildSource = this.processManager.getAutoBuildSourcePath();
+ const cwd = autoBuildSource || process.cwd();
+
+ // Ensure Python environment is ready before spawning
+ if (!await this.ensurePythonEnvReady(projectId, 'roadmap-error')) {
+ return;
+ }
+
// Kill existing process for this project if any
const wasKilled = this.processManager.killProcess(projectId);
if (wasKilled) {
@@ -517,9 +575,6 @@ export class AgentQueueManager {
const spawnId = this.state.generateSpawnId();
debugLog('[Agent Queue] Generated roadmap spawn ID:', spawnId);
- // Run from auto-claude source directory so imports work correctly
- const autoBuildSource = this.processManager.getAutoBuildSourcePath();
- const cwd = autoBuildSource || process.cwd();
// Get combined environment variables
const combinedEnv = this.processManager.getCombinedEnv(projectPath);
@@ -527,6 +582,12 @@ export class AgentQueueManager {
// Get active Claude profile environment (CLAUDE_CODE_OAUTH_TOKEN if not default)
const profileEnv = getProfileEnv();
+ // Get active API profile environment variables
+ const apiProfileEnv = await getAPIProfileEnv();
+
+ // Get OAuth mode clearing vars (clears stale ANTHROPIC_* vars when in OAuth mode)
+ const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv);
+
// Get Python path from process manager (uses venv if configured)
const pythonPath = this.processManager.getPythonPath();
@@ -547,28 +608,30 @@ export class AgentQueueManager {
// 1. process.env (system)
// 2. pythonEnv (bundled packages environment)
// 3. combinedEnv (auto-claude/.env for CLI usage)
- // 4. profileEnv (Electron app OAuth token - highest priority)
- // 5. Our specific overrides
+ // 4. oauthModeClearVars (clear stale ANTHROPIC_* vars when in OAuth mode)
+ // 5. profileEnv (Electron app OAuth token)
+ // 6. apiProfileEnv (Active API profile config - highest priority for ANTHROPIC_* vars)
+ // 7. Our specific overrides
const finalEnv = {
...process.env,
...pythonEnv,
...combinedEnv,
+ ...oauthModeClearVars,
...profileEnv,
+ ...apiProfileEnv,
PYTHONPATH: combinedPythonPath,
PYTHONUNBUFFERED: '1',
PYTHONUTF8: '1'
};
- // Debug: Show OAuth token source
+ // Debug: Show OAuth token source (token values intentionally omitted for security - AC4)
const tokenSource = profileEnv['CLAUDE_CODE_OAUTH_TOKEN']
? 'Electron app profile'
: (combinedEnv['CLAUDE_CODE_OAUTH_TOKEN'] ? 'auto-claude/.env' : 'not found');
- const oauthToken = (finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN'];
- const hasToken = !!oauthToken;
+ const hasToken = !!(finalEnv as Record)['CLAUDE_CODE_OAUTH_TOKEN'];
debugLog('[Agent Queue] OAuth token status:', {
source: tokenSource,
- hasToken,
- tokenPreview: hasToken ? oauthToken?.substring(0, 20) + '...' : 'none'
+ hasToken
});
// Parse Python command to handle space-separated commands like "py -3"
diff --git a/apps/frontend/src/main/agent/env-utils.test.ts b/apps/frontend/src/main/agent/env-utils.test.ts
new file mode 100644
index 0000000000..41f145cf90
--- /dev/null
+++ b/apps/frontend/src/main/agent/env-utils.test.ts
@@ -0,0 +1,163 @@
+/**
+ * Unit tests for env-utils
+ * Tests OAuth mode environment variable clearing functionality
+ */
+
+import { describe, it, expect } from 'vitest';
+import { getOAuthModeClearVars } from './env-utils';
+
+describe('getOAuthModeClearVars', () => {
+ describe('OAuth mode (no active API profile)', () => {
+ it('should return clearing vars when apiProfileEnv is empty', () => {
+ const result = getOAuthModeClearVars({});
+
+ expect(result).toEqual({
+ // Standard Anthropic API vars
+ ANTHROPIC_API_KEY: '',
+ ANTHROPIC_AUTH_TOKEN: '',
+ ANTHROPIC_BASE_URL: '',
+ ANTHROPIC_MODEL: '',
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: '',
+ ANTHROPIC_DEFAULT_SONNET_MODEL: '',
+ ANTHROPIC_DEFAULT_OPUS_MODEL: '',
+ // Microsoft Foundry vars
+ CLAUDE_CODE_USE_FOUNDRY: '',
+ ANTHROPIC_FOUNDRY_API_KEY: '',
+ ANTHROPIC_FOUNDRY_BASE_URL: '',
+ ANTHROPIC_FOUNDRY_RESOURCE: ''
+ });
+ });
+
+ it('should clear all ANTHROPIC_* and Foundry environment variables', () => {
+ const result = getOAuthModeClearVars({});
+
+ // Verify all known ANTHROPIC_* vars are cleared
+ expect(result.ANTHROPIC_API_KEY).toBe('');
+ expect(result.ANTHROPIC_AUTH_TOKEN).toBe('');
+ expect(result.ANTHROPIC_BASE_URL).toBe('');
+ expect(result.ANTHROPIC_MODEL).toBe('');
+ expect(result.ANTHROPIC_DEFAULT_HAIKU_MODEL).toBe('');
+ expect(result.ANTHROPIC_DEFAULT_SONNET_MODEL).toBe('');
+ expect(result.ANTHROPIC_DEFAULT_OPUS_MODEL).toBe('');
+ // Verify Foundry vars are cleared
+ expect(result.CLAUDE_CODE_USE_FOUNDRY).toBe('');
+ expect(result.ANTHROPIC_FOUNDRY_API_KEY).toBe('');
+ expect(result.ANTHROPIC_FOUNDRY_BASE_URL).toBe('');
+ expect(result.ANTHROPIC_FOUNDRY_RESOURCE).toBe('');
+ });
+ });
+
+ describe('API Profile mode (active profile)', () => {
+ it('should return empty object when apiProfileEnv has values', () => {
+ const apiProfileEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-active-profile',
+ ANTHROPIC_BASE_URL: 'https://custom.api.com'
+ };
+
+ const result = getOAuthModeClearVars(apiProfileEnv);
+
+ expect(result).toEqual({});
+ });
+
+ it('should NOT clear vars when API profile is active', () => {
+ const apiProfileEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-test',
+ ANTHROPIC_BASE_URL: 'https://test.com',
+ ANTHROPIC_MODEL: 'claude-3-opus'
+ };
+
+ const result = getOAuthModeClearVars(apiProfileEnv);
+
+ // Should not return any clearing vars
+ expect(Object.keys(result)).toHaveLength(0);
+ });
+
+ it('should detect non-empty profile even with single property', () => {
+ const apiProfileEnv = {
+ ANTHROPIC_AUTH_TOKEN: 'sk-minimal'
+ };
+
+ const result = getOAuthModeClearVars(apiProfileEnv);
+
+ expect(result).toEqual({});
+ });
+ });
+
+ describe('Edge cases', () => {
+ it('should handle undefined gracefully (treat as empty)', () => {
+ // TypeScript should prevent this, but runtime safety
+ const result = getOAuthModeClearVars(undefined as any);
+
+ // Should treat undefined as empty object -> OAuth mode
+ expect(result).toBeDefined();
+ });
+
+ it('should handle null gracefully (treat as empty)', () => {
+ // Runtime safety for null values
+ const result = getOAuthModeClearVars(null as any);
+
+ // Should treat null as OAuth mode and return clearing vars
+ expect(result).toEqual({
+ // Standard Anthropic API vars
+ ANTHROPIC_API_KEY: '',
+ ANTHROPIC_AUTH_TOKEN: '',
+ ANTHROPIC_BASE_URL: '',
+ ANTHROPIC_MODEL: '',
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: '',
+ ANTHROPIC_DEFAULT_SONNET_MODEL: '',
+ ANTHROPIC_DEFAULT_OPUS_MODEL: '',
+ // Microsoft Foundry vars
+ CLAUDE_CODE_USE_FOUNDRY: '',
+ ANTHROPIC_FOUNDRY_API_KEY: '',
+ ANTHROPIC_FOUNDRY_BASE_URL: '',
+ ANTHROPIC_FOUNDRY_RESOURCE: ''
+ });
+ });
+
+ it('should return consistent object shape for OAuth mode', () => {
+ const result1 = getOAuthModeClearVars({});
+ const result2 = getOAuthModeClearVars({});
+
+ expect(result1).toEqual(result2);
+ // Use specific expected keys instead of magic number
+ const expectedKeys = [
+ // Standard Anthropic API vars
+ 'ANTHROPIC_API_KEY',
+ 'ANTHROPIC_AUTH_TOKEN',
+ 'ANTHROPIC_BASE_URL',
+ 'ANTHROPIC_MODEL',
+ 'ANTHROPIC_DEFAULT_HAIKU_MODEL',
+ 'ANTHROPIC_DEFAULT_SONNET_MODEL',
+ 'ANTHROPIC_DEFAULT_OPUS_MODEL',
+ // Microsoft Foundry vars
+ 'CLAUDE_CODE_USE_FOUNDRY',
+ 'ANTHROPIC_FOUNDRY_API_KEY',
+ 'ANTHROPIC_FOUNDRY_BASE_URL',
+ 'ANTHROPIC_FOUNDRY_RESOURCE'
+ ];
+ expect(Object.keys(result1).sort()).toEqual(expectedKeys.sort());
+ });
+
+ it('should NOT clear if apiProfileEnv has non-ANTHROPIC keys only', () => {
+ // Edge case: service returns metadata but no ANTHROPIC_* vars
+ const result = getOAuthModeClearVars({ SOME_OTHER_VAR: 'value' });
+
+ // Should treat as OAuth mode since no ANTHROPIC_* keys present
+ expect(result).toEqual({
+ // Standard Anthropic API vars
+ ANTHROPIC_API_KEY: '',
+ ANTHROPIC_AUTH_TOKEN: '',
+ ANTHROPIC_BASE_URL: '',
+ ANTHROPIC_MODEL: '',
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: '',
+ ANTHROPIC_DEFAULT_SONNET_MODEL: '',
+ ANTHROPIC_DEFAULT_OPUS_MODEL: '',
+ // Microsoft Foundry vars
+ CLAUDE_CODE_USE_FOUNDRY: '',
+ ANTHROPIC_FOUNDRY_API_KEY: '',
+ ANTHROPIC_FOUNDRY_BASE_URL: '',
+ ANTHROPIC_FOUNDRY_RESOURCE: ''
+ });
+ });
+ });
+});
diff --git a/apps/frontend/src/main/agent/env-utils.ts b/apps/frontend/src/main/agent/env-utils.ts
new file mode 100644
index 0000000000..1f2af94fbe
--- /dev/null
+++ b/apps/frontend/src/main/agent/env-utils.ts
@@ -0,0 +1,54 @@
+/**
+ * Utility functions for managing environment variables in agent spawning
+ */
+
+/**
+ * Get environment variables to clear when in OAuth mode
+ *
+ * When switching from API Profile mode to OAuth mode, residual environment
+ * variables from process.env can cause authentication failures.
+ * This function returns an object with empty strings for these vars when
+ * no API profile is active, ensuring OAuth tokens are used correctly.
+ *
+ * Clears both standard Anthropic API vars and Microsoft Foundry vars.
+ *
+ * **Why empty strings?** Setting environment variables to empty strings (rather than
+ * undefined) ensures they override any stale values from process.env. Python's SDK
+ * treats empty strings as falsy in conditional checks like `if token:`, so empty
+ * strings effectively disable these authentication parameters without leaving
+ * undefined values that might be ignored during object spreading.
+ *
+ * @param apiProfileEnv - Environment variables from getAPIProfileEnv()
+ * @returns Object with empty profile-related vars if in OAuth mode, empty object otherwise
+ */
+export function getOAuthModeClearVars(apiProfileEnv: Record): Record {
+ // If API profile is active (has ANTHROPIC_* or Foundry vars), don't clear anything
+ if (apiProfileEnv && Object.keys(apiProfileEnv).some(key =>
+ key.startsWith('ANTHROPIC_') || key === 'CLAUDE_CODE_USE_FOUNDRY'
+ )) {
+ return {};
+ }
+
+ // In OAuth mode (no API profile), clear all profile-related vars
+ // Setting to empty string ensures they override any values from process.env
+ // Python's `if token:` checks treat empty strings as falsy
+ //
+ // IMPORTANT: ANTHROPIC_API_KEY is included to prevent Claude Code from using
+ // API keys that may be present in the shell environment instead of OAuth tokens.
+ // Without clearing this, Claude Code would show "Claude API" instead of "Claude Max".
+ return {
+ // Standard Anthropic API vars
+ ANTHROPIC_API_KEY: '',
+ ANTHROPIC_AUTH_TOKEN: '',
+ ANTHROPIC_BASE_URL: '',
+ ANTHROPIC_MODEL: '',
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: '',
+ ANTHROPIC_DEFAULT_SONNET_MODEL: '',
+ ANTHROPIC_DEFAULT_OPUS_MODEL: '',
+ // Microsoft Foundry vars
+ CLAUDE_CODE_USE_FOUNDRY: '',
+ ANTHROPIC_FOUNDRY_API_KEY: '',
+ ANTHROPIC_FOUNDRY_BASE_URL: '',
+ ANTHROPIC_FOUNDRY_RESOURCE: ''
+ };
+}
diff --git a/apps/frontend/src/main/agent/types.ts b/apps/frontend/src/main/agent/types.ts
index fa3c5b8d9d..8d9eb9fcc9 100644
--- a/apps/frontend/src/main/agent/types.ts
+++ b/apps/frontend/src/main/agent/types.ts
@@ -44,6 +44,7 @@ export interface TaskExecutionOptions {
parallel?: boolean;
workers?: number;
baseBranch?: string;
+ useWorktree?: boolean; // If false, use --direct mode (no worktree isolation)
}
export interface SpecCreationMetadata {
@@ -65,6 +66,8 @@ export interface SpecCreationMetadata {
// Non-auto profile - single model and thinking level
model?: 'haiku' | 'sonnet' | 'opus';
thinkingLevel?: 'none' | 'low' | 'medium' | 'high' | 'ultrathink';
+ // Workspace mode - whether to use worktree isolation
+ useWorktree?: boolean; // If false, use --direct mode (no worktree isolation)
}
export interface IdeationProgressData {
diff --git a/apps/frontend/src/main/app-updater.ts b/apps/frontend/src/main/app-updater.ts
index a76444dd3b..98f1f824bf 100644
--- a/apps/frontend/src/main/app-updater.ts
+++ b/apps/frontend/src/main/app-updater.ts
@@ -18,12 +18,16 @@
*/
import { autoUpdater } from 'electron-updater';
-import { app } from 'electron';
+import { app, net } from 'electron';
import type { BrowserWindow } from 'electron';
import { IPC_CHANNELS } from '../shared/constants';
import type { AppUpdateInfo } from '../shared/types';
import { compareVersions } from './updater/version-manager';
+// GitHub repo info for API calls
+const GITHUB_OWNER = 'AndyMik90';
+const GITHUB_REPO = 'Auto-Claude';
+
// Debug mode - DEBUG_UPDATER=true or development mode
const DEBUG_UPDATER = process.env.DEBUG_UPDATER === 'true' || process.env.NODE_ENV === 'development';
@@ -251,3 +255,214 @@ export function quitAndInstall(): void {
export function getCurrentVersion(): string {
return autoUpdater.currentVersion.version;
}
+
+/**
+ * Check if a version string represents a prerelease (beta, alpha, rc, etc.)
+ */
+export function isPrerelease(version: string): boolean {
+ return /-(alpha|beta|rc|dev|canary)\.\d+$/i.test(version) || version.includes('-');
+}
+
+// Timeout for GitHub API requests (10 seconds)
+const GITHUB_API_TIMEOUT = 10000;
+
+/**
+ * Fetch the latest stable release from GitHub API
+ * Returns the latest non-prerelease version
+ */
+async function fetchLatestStableRelease(): Promise {
+ const fetchPromise = new Promise((resolve) => {
+ const url = `https://api.github.com/repos/${GITHUB_OWNER}/${GITHUB_REPO}/releases`;
+ console.warn('[app-updater] Fetching releases from:', url);
+
+ const request = net.request({
+ url,
+ method: 'GET'
+ });
+
+ request.setHeader('Accept', 'application/vnd.github.v3+json');
+ request.setHeader('User-Agent', `Auto-Claude/${getCurrentVersion()}`);
+
+ let data = '';
+
+ request.on('response', (response) => {
+ // Validate HTTP status code
+ const statusCode = response.statusCode;
+ if (statusCode !== 200) {
+ // Sanitize statusCode to prevent log injection
+ // Convert to number and validate range to ensure it's a valid HTTP status code
+ const numericCode = Number(statusCode);
+ const safeStatusCode = (Number.isInteger(numericCode) && numericCode >= 100 && numericCode < 600)
+ ? String(numericCode)
+ : 'unknown';
+ console.error(`[app-updater] GitHub API error: HTTP ${safeStatusCode}`);
+ if (statusCode === 403) {
+ console.error('[app-updater] Rate limit may have been exceeded');
+ } else if (statusCode === 404) {
+ console.error('[app-updater] Repository or releases not found');
+ }
+ resolve(null);
+ return;
+ }
+
+ response.on('data', (chunk) => {
+ data += chunk.toString();
+ });
+
+ response.on('end', () => {
+ try {
+ const parsed = JSON.parse(data);
+
+ // Validate response is an array
+ if (!Array.isArray(parsed)) {
+ console.error('[app-updater] Unexpected response format - expected array, got:', typeof parsed);
+ resolve(null);
+ return;
+ }
+
+ const releases = parsed as Array<{
+ tag_name: string;
+ prerelease: boolean;
+ draft: boolean;
+ body?: string;
+ published_at?: string;
+ html_url?: string;
+ }>;
+
+ // Find the first non-prerelease, non-draft release
+ const latestStable = releases.find(r => !r.prerelease && !r.draft);
+
+ if (!latestStable) {
+ console.warn('[app-updater] No stable release found');
+ resolve(null);
+ return;
+ }
+
+ const version = latestStable.tag_name.replace(/^v/, '');
+ // Sanitize version string for logging (remove control characters and limit length)
+ // eslint-disable-next-line no-control-regex
+ const safeVersion = String(version).replace(/[\x00-\x1f\x7f]/g, '').slice(0, 50);
+ console.warn('[app-updater] Found latest stable release:', safeVersion);
+
+ resolve({
+ version,
+ releaseNotes: latestStable.body,
+ releaseDate: latestStable.published_at
+ });
+ } catch (e) {
+ // Sanitize error message for logging (prevent log injection from malformed JSON)
+ const safeError = e instanceof Error ? e.message : 'Unknown parse error';
+ console.error('[app-updater] Failed to parse releases JSON:', safeError);
+ resolve(null);
+ }
+ });
+ });
+
+ request.on('error', (error) => {
+ // Sanitize error message for logging (use only the message property)
+ const safeErrorMessage = error instanceof Error ? error.message : 'Unknown error';
+ console.error('[app-updater] Failed to fetch releases:', safeErrorMessage);
+ resolve(null);
+ });
+
+ request.end();
+ });
+
+ // Add timeout to prevent hanging indefinitely
+ const timeoutPromise = new Promise((resolve) => {
+ setTimeout(() => {
+ console.error(`[app-updater] GitHub API request timed out after ${GITHUB_API_TIMEOUT}ms`);
+ resolve(null);
+ }, GITHUB_API_TIMEOUT);
+ });
+
+ return Promise.race([fetchPromise, timeoutPromise]);
+}
+
+/**
+ * Check if we should offer a downgrade to stable
+ * Called when user disables beta updates while on a prerelease version
+ *
+ * Returns the latest stable version if:
+ * 1. Current version is a prerelease
+ * 2. A stable version exists
+ */
+export async function checkForStableDowngrade(): Promise {
+ const currentVersion = getCurrentVersion();
+
+ // Only check for downgrade if currently on a prerelease
+ if (!isPrerelease(currentVersion)) {
+ console.warn('[app-updater] Current version is not a prerelease, no downgrade needed');
+ return null;
+ }
+
+ console.warn('[app-updater] Current version is prerelease:', currentVersion);
+ console.warn('[app-updater] Checking for stable version to downgrade to...');
+
+ const latestStable = await fetchLatestStableRelease();
+
+ if (!latestStable) {
+ console.warn('[app-updater] No stable release available for downgrade');
+ return null;
+ }
+
+ console.warn('[app-updater] Stable downgrade available:', latestStable.version);
+ return latestStable;
+}
+
+/**
+ * Set update channel with optional downgrade check
+ * When switching from beta to stable, checks if user should be offered a downgrade
+ *
+ * @param channel - The update channel to switch to
+ * @param triggerDowngradeCheck - Whether to check for stable downgrade (when disabling beta)
+ */
+export async function setUpdateChannelWithDowngradeCheck(
+ channel: UpdateChannel,
+ triggerDowngradeCheck = false
+): Promise {
+ autoUpdater.channel = channel;
+ console.warn(`[app-updater] Update channel set to: ${channel}`);
+
+ // If switching to stable and downgrade check requested, look for stable version
+ if (channel === 'latest' && triggerDowngradeCheck) {
+ const stableVersion = await checkForStableDowngrade();
+
+ if (stableVersion && mainWindow) {
+ // Notify the renderer about the available stable downgrade
+ mainWindow.webContents.send(IPC_CHANNELS.APP_UPDATE_STABLE_DOWNGRADE, stableVersion);
+ }
+
+ return stableVersion;
+ }
+
+ return null;
+}
+
+/**
+ * Download a specific version (for downgrade)
+ * Uses electron-updater with allowDowngrade enabled to download older stable versions
+ */
+export async function downloadStableVersion(): Promise {
+ // Switch to stable channel
+ autoUpdater.channel = 'latest';
+ // Enable downgrade to allow downloading older versions (e.g., stable when on beta)
+ autoUpdater.allowDowngrade = true;
+ console.warn('[app-updater] Downloading stable version (allowDowngrade=true)...');
+
+ try {
+ // Force a fresh check on the stable channel, then download
+ const result = await autoUpdater.checkForUpdates();
+ if (result) {
+ await autoUpdater.downloadUpdate();
+ } else {
+ throw new Error('No stable version available for download');
+ }
+ } catch (error) {
+ console.error('[app-updater] Failed to download stable version:', error);
+ throw error;
+ } finally {
+ // Reset allowDowngrade to prevent unintended downgrades in normal update checks
+ autoUpdater.allowDowngrade = false;
+ }
+}
diff --git a/apps/frontend/src/main/auto-claude-updater.ts b/apps/frontend/src/main/auto-claude-updater.ts
deleted file mode 100644
index b19e19855e..0000000000
--- a/apps/frontend/src/main/auto-claude-updater.ts
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Auto Claude Source Updater
- *
- * Checks GitHub Releases for updates and downloads them.
- * GitHub Releases are the single source of truth for versioning.
- *
- * Update flow:
- * 1. Check GitHub Releases API for the latest release
- * 2. Compare release tag with current app version
- * 3. If update available, download release tarball and apply
- * 4. Existing project update system handles pushing to individual projects
- *
- * Versioning:
- * - Single source of truth: GitHub Releases
- * - Current version: app.getVersion() (from package.json at build time)
- * - Latest version: Fetched from GitHub Releases API
- * - To release: Create a GitHub release with tag (e.g., v1.2.0)
- */
-
-// Export types
-export type {
- GitHubRelease,
- AutoBuildUpdateCheck,
- AutoBuildUpdateResult,
- UpdateProgressCallback,
- UpdateMetadata
-} from './updater/types';
-
-// Export version management
-export { getBundledVersion, getEffectiveVersion } from './updater/version-manager';
-
-// Export path resolution
-export {
- getBundledSourcePath,
- getEffectiveSourcePath
-} from './updater/path-resolver';
-
-// Export update checking
-export { checkForUpdates } from './updater/update-checker';
-
-// Export update installation
-export { downloadAndApplyUpdate } from './updater/update-installer';
-
-// Export update status
-export {
- hasPendingSourceUpdate,
- getUpdateMetadata
-} from './updater/update-status';
diff --git a/apps/frontend/src/main/changelog/generator.ts b/apps/frontend/src/main/changelog/generator.ts
index c71af9c3d4..6fa75c06fb 100644
--- a/apps/frontend/src/main/changelog/generator.ts
+++ b/apps/frontend/src/main/changelog/generator.ts
@@ -13,6 +13,7 @@ import { extractChangelog } from './parser';
import { getCommits, getBranchDiffCommits } from './git-integration';
import { detectRateLimit, createSDKRateLimitInfo, getProfileEnv } from '../rate-limit-detector';
import { parsePythonCommand } from '../python-detector';
+import { getAugmentedEnv } from '../env-utils';
/**
* Core changelog generation logic
@@ -246,21 +247,9 @@ export class ChangelogGenerator extends EventEmitter {
const homeDir = os.homedir();
const isWindows = process.platform === 'win32';
- // Build PATH with platform-appropriate separator and locations
- const pathAdditions = isWindows
- ? [
- path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude'),
- path.join(homeDir, 'AppData', 'Roaming', 'npm'),
- path.join(homeDir, '.local', 'bin'),
- 'C:\\Program Files\\Claude',
- 'C:\\Program Files (x86)\\Claude'
- ]
- : [
- '/usr/local/bin',
- '/opt/homebrew/bin',
- path.join(homeDir, '.local', 'bin'),
- path.join(homeDir, 'bin')
- ];
+ // Use getAugmentedEnv() to ensure common tool paths are available
+ // even when app is launched from Finder/Dock
+ const augmentedEnv = getAugmentedEnv();
// Get active Claude profile environment (OAuth token preferred, falls back to CLAUDE_CONFIG_DIR)
const profileEnv = getProfileEnv();
@@ -271,15 +260,13 @@ export class ChangelogGenerator extends EventEmitter {
});
const spawnEnv: Record = {
- ...process.env as Record,
+ ...augmentedEnv,
...this.autoBuildEnv,
...profileEnv, // Include active Claude profile config
// Ensure critical env vars are set for claude CLI
// Use USERPROFILE on Windows, HOME on Unix
...(isWindows ? { USERPROFILE: homeDir } : { HOME: homeDir }),
USER: process.env.USER || process.env.USERNAME || 'user',
- // Add common binary locations to PATH for claude CLI
- PATH: [process.env.PATH || '', ...pathAdditions].filter(Boolean).join(path.delimiter),
PYTHONUNBUFFERED: '1',
PYTHONIOENCODING: 'utf-8',
PYTHONUTF8: '1'
diff --git a/apps/frontend/src/main/changelog/version-suggester.ts b/apps/frontend/src/main/changelog/version-suggester.ts
index 4869fe41ef..6d4a9b9126 100644
--- a/apps/frontend/src/main/changelog/version-suggester.ts
+++ b/apps/frontend/src/main/changelog/version-suggester.ts
@@ -1,9 +1,9 @@
import { spawn } from 'child_process';
-import * as path from 'path';
import * as os from 'os';
import type { GitCommit } from '../../shared/types';
import { getProfileEnv } from '../rate-limit-detector';
import { parsePythonCommand } from '../python-detector';
+import { getAugmentedEnv } from '../env-utils';
interface VersionSuggestion {
version: string;
@@ -215,31 +215,19 @@ except Exception as e:
const homeDir = os.homedir();
const isWindows = process.platform === 'win32';
- // Build PATH with platform-appropriate separator and locations
- const pathAdditions = isWindows
- ? [
- path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude'),
- path.join(homeDir, 'AppData', 'Roaming', 'npm'),
- path.join(homeDir, '.local', 'bin'),
- 'C:\\Program Files\\Claude',
- 'C:\\Program Files (x86)\\Claude'
- ]
- : [
- '/usr/local/bin',
- '/opt/homebrew/bin',
- path.join(homeDir, '.local', 'bin'),
- path.join(homeDir, 'bin')
- ];
+ // Use getAugmentedEnv() to ensure common tool paths are available
+ // even when app is launched from Finder/Dock
+ const augmentedEnv = getAugmentedEnv();
// Get active Claude profile environment
const profileEnv = getProfileEnv();
const spawnEnv: Record = {
- ...process.env as Record,
+ ...augmentedEnv,
...profileEnv,
+ // Ensure critical env vars are set for claude CLI
...(isWindows ? { USERPROFILE: homeDir } : { HOME: homeDir }),
USER: process.env.USER || process.env.USERNAME || 'user',
- PATH: [process.env.PATH || '', ...pathAdditions].filter(Boolean).join(path.delimiter),
PYTHONUNBUFFERED: '1',
PYTHONIOENCODING: 'utf-8',
PYTHONUTF8: '1'
diff --git a/apps/frontend/src/main/claude-cli-utils.ts b/apps/frontend/src/main/claude-cli-utils.ts
new file mode 100644
index 0000000000..49a0c49c71
--- /dev/null
+++ b/apps/frontend/src/main/claude-cli-utils.ts
@@ -0,0 +1,77 @@
+import path from 'path';
+import { getAugmentedEnv, getAugmentedEnvAsync } from './env-utils';
+import { getToolPath, getToolPathAsync } from './cli-tool-manager';
+
+export type ClaudeCliInvocation = {
+ command: string;
+ env: Record;
+};
+
+function ensureCommandDirInPath(command: string, env: Record): Record {
+ if (!path.isAbsolute(command)) {
+ return env;
+ }
+
+ const pathSeparator = process.platform === 'win32' ? ';' : ':';
+ const commandDir = path.dirname(command);
+ const currentPath = env.PATH || '';
+ const pathEntries = currentPath.split(pathSeparator);
+ const normalizedCommandDir = path.normalize(commandDir);
+ const hasCommandDir = process.platform === 'win32'
+ ? pathEntries
+ .map((entry) => path.normalize(entry).toLowerCase())
+ .includes(normalizedCommandDir.toLowerCase())
+ : pathEntries
+ .map((entry) => path.normalize(entry))
+ .includes(normalizedCommandDir);
+
+ if (hasCommandDir) {
+ return env;
+ }
+
+ return {
+ ...env,
+ PATH: [commandDir, currentPath].filter(Boolean).join(pathSeparator),
+ };
+}
+
+/**
+ * Returns the Claude CLI command path and an environment with PATH updated to include the CLI directory.
+ *
+ * WARNING: This function uses synchronous subprocess calls that block the main process.
+ * For use in Electron main process, prefer getClaudeCliInvocationAsync() instead.
+ */
+export function getClaudeCliInvocation(): ClaudeCliInvocation {
+ const command = getToolPath('claude');
+ const env = getAugmentedEnv();
+
+ return {
+ command,
+ env: ensureCommandDirInPath(command, env),
+ };
+}
+
+/**
+ * Returns the Claude CLI command path and environment asynchronously (non-blocking).
+ *
+ * Safe to call from Electron main process without blocking the event loop.
+ * Uses cached values if available for instant response.
+ *
+ * @example
+ * ```typescript
+ * const { command, env } = await getClaudeCliInvocationAsync();
+ * spawn(command, ['--version'], { env });
+ * ```
+ */
+export async function getClaudeCliInvocationAsync(): Promise {
+ // Run both detections in parallel for efficiency
+ const [command, env] = await Promise.all([
+ getToolPathAsync('claude'),
+ getAugmentedEnvAsync(),
+ ]);
+
+ return {
+ command,
+ env: ensureCommandDirInPath(command, env),
+ };
+}
diff --git a/apps/frontend/src/main/claude-profile-manager.ts b/apps/frontend/src/main/claude-profile-manager.ts
index 0f9c88f6d6..f64ef42d81 100644
--- a/apps/frontend/src/main/claude-profile-manager.ts
+++ b/apps/frontend/src/main/claude-profile-manager.ts
@@ -13,7 +13,7 @@
import { app } from 'electron';
import { join } from 'path';
-import { existsSync, mkdirSync } from 'fs';
+import { mkdir } from 'fs/promises';
import type {
ClaudeProfile,
ClaudeProfileSettings,
@@ -32,6 +32,7 @@ import {
} from './claude-profile/rate-limit-manager';
import {
loadProfileStore,
+ loadProfileStoreAsync,
saveProfileStore,
ProfileStoreData,
DEFAULT_AUTO_SWITCH_SETTINGS
@@ -57,19 +58,45 @@ import {
*/
export class ClaudeProfileManager {
private storePath: string;
+ private configDir: string;
private data: ProfileStoreData;
+ private initialized: boolean = false;
constructor() {
- const configDir = join(app.getPath('userData'), 'config');
- this.storePath = join(configDir, 'claude-profiles.json');
+ this.configDir = join(app.getPath('userData'), 'config');
+ this.storePath = join(this.configDir, 'claude-profiles.json');
- // Ensure directory exists
- if (!existsSync(configDir)) {
- mkdirSync(configDir, { recursive: true });
+ // DON'T do file I/O here - defer to async initialize()
+ // Start with default data until initialized
+ this.data = this.createDefaultData();
+ }
+
+ /**
+ * Initialize the profile manager asynchronously (non-blocking)
+ * This should be called at app startup via initializeClaudeProfileManager()
+ */
+ async initialize(): Promise {
+ if (this.initialized) return;
+
+ // Ensure directory exists (async) - mkdir with recursive:true is idempotent
+ await mkdir(this.configDir, { recursive: true });
+
+ // Load existing data asynchronously
+ const loadedData = await loadProfileStoreAsync(this.storePath);
+ if (loadedData) {
+ this.data = loadedData;
}
+ // else: keep the default data from constructor
+
+ this.initialized = true;
+ console.warn('[ClaudeProfileManager] Initialized asynchronously');
+ }
- // Load existing data or initialize with default profile
- this.data = this.load();
+ /**
+ * Check if the profile manager has been initialized
+ */
+ isInitialized(): boolean {
+ return this.initialized;
}
/**
@@ -522,11 +549,13 @@ export class ClaudeProfileManager {
}
}
-// Singleton instance
+// Singleton instance and initialization promise
let profileManager: ClaudeProfileManager | null = null;
+let initPromise: Promise | null = null;
/**
* Get the singleton Claude profile manager instance
+ * Note: For async contexts, prefer initializeClaudeProfileManager() to ensure initialization
*/
export function getClaudeProfileManager(): ClaudeProfileManager {
if (!profileManager) {
@@ -534,3 +563,28 @@ export function getClaudeProfileManager(): ClaudeProfileManager {
}
return profileManager;
}
+
+/**
+ * Initialize and get the singleton Claude profile manager instance (async)
+ * This ensures the profile manager is fully initialized before use.
+ * Uses promise caching to prevent concurrent initialization.
+ */
+export async function initializeClaudeProfileManager(): Promise {
+ if (!profileManager) {
+ profileManager = new ClaudeProfileManager();
+ }
+
+ // If already initialized, return immediately
+ if (profileManager.isInitialized()) {
+ return profileManager;
+ }
+
+ // If initialization is in progress, wait for it (promise caching)
+ if (!initPromise) {
+ initPromise = profileManager.initialize().then(() => {
+ return profileManager!;
+ });
+ }
+
+ return initPromise;
+}
diff --git a/apps/frontend/src/main/claude-profile/profile-storage.ts b/apps/frontend/src/main/claude-profile/profile-storage.ts
index bd5b89c372..a4c825e2f2 100644
--- a/apps/frontend/src/main/claude-profile/profile-storage.ts
+++ b/apps/frontend/src/main/claude-profile/profile-storage.ts
@@ -4,6 +4,7 @@
*/
import { existsSync, readFileSync, writeFileSync } from 'fs';
+import { readFile } from 'fs/promises';
import type { ClaudeProfile, ClaudeAutoSwitchSettings } from '../../shared/types';
export const STORE_VERSION = 3; // Bumped for encrypted token storage
@@ -30,6 +31,42 @@ export interface ProfileStoreData {
autoSwitch?: ClaudeAutoSwitchSettings;
}
+/**
+ * Parse and migrate profile data from JSON.
+ * Handles version migration and date parsing.
+ * Shared helper used by both sync and async loaders.
+ */
+function parseAndMigrateProfileData(data: Record): ProfileStoreData | null {
+ // Handle version migration
+ if (data.version === 1) {
+ // Migrate v1 to v2: add usage and rateLimitEvents fields
+ data.version = STORE_VERSION;
+ data.autoSwitch = DEFAULT_AUTO_SWITCH_SETTINGS;
+ }
+
+ if (data.version === STORE_VERSION) {
+ // Parse dates
+ const profiles = data.profiles as ClaudeProfile[];
+ data.profiles = profiles.map((p: ClaudeProfile) => ({
+ ...p,
+ createdAt: new Date(p.createdAt),
+ lastUsedAt: p.lastUsedAt ? new Date(p.lastUsedAt) : undefined,
+ usage: p.usage ? {
+ ...p.usage,
+ lastUpdated: new Date(p.usage.lastUpdated)
+ } : undefined,
+ rateLimitEvents: p.rateLimitEvents?.map(e => ({
+ ...e,
+ hitAt: new Date(e.hitAt),
+ resetAt: new Date(e.resetAt)
+ }))
+ }));
+ return data as unknown as ProfileStoreData;
+ }
+
+ return null;
+}
+
/**
* Load profiles from disk
*/
@@ -38,32 +75,7 @@ export function loadProfileStore(storePath: string): ProfileStoreData | null {
if (existsSync(storePath)) {
const content = readFileSync(storePath, 'utf-8');
const data = JSON.parse(content);
-
- // Handle version migration
- if (data.version === 1) {
- // Migrate v1 to v2: add usage and rateLimitEvents fields
- data.version = STORE_VERSION;
- data.autoSwitch = DEFAULT_AUTO_SWITCH_SETTINGS;
- }
-
- if (data.version === STORE_VERSION) {
- // Parse dates
- data.profiles = data.profiles.map((p: ClaudeProfile) => ({
- ...p,
- createdAt: new Date(p.createdAt),
- lastUsedAt: p.lastUsedAt ? new Date(p.lastUsedAt) : undefined,
- usage: p.usage ? {
- ...p.usage,
- lastUpdated: new Date(p.usage.lastUpdated)
- } : undefined,
- rateLimitEvents: p.rateLimitEvents?.map(e => ({
- ...e,
- hitAt: new Date(e.hitAt),
- resetAt: new Date(e.resetAt)
- }))
- }));
- return data;
- }
+ return parseAndMigrateProfileData(data);
}
} catch (error) {
console.error('[ProfileStorage] Error loading profiles:', error);
@@ -72,6 +84,27 @@ export function loadProfileStore(storePath: string): ProfileStoreData | null {
return null;
}
+/**
+ * Load profiles from disk (async, non-blocking)
+ * Use this version for initialization to avoid blocking the main process.
+ */
+export async function loadProfileStoreAsync(storePath: string): Promise {
+ try {
+ // Read file directly - avoid TOCTOU race condition by not checking existence first
+ // If file doesn't exist, readFile will throw ENOENT which we handle below
+ const content = await readFile(storePath, 'utf-8');
+ const data = JSON.parse(content);
+ return parseAndMigrateProfileData(data);
+ } catch (error) {
+ // ENOENT is expected if file doesn't exist yet
+ if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
+ console.error('[ProfileStorage] Error loading profiles:', error);
+ }
+ }
+
+ return null;
+}
+
/**
* Save profiles to disk
*/
diff --git a/apps/frontend/src/main/claude-profile/profile-utils.ts b/apps/frontend/src/main/claude-profile/profile-utils.ts
index 557d8fae0e..80a3c048cb 100644
--- a/apps/frontend/src/main/claude-profile/profile-utils.ts
+++ b/apps/frontend/src/main/claude-profile/profile-utils.ts
@@ -56,7 +56,7 @@ export async function createProfileDirectory(profileName: string): Promise {
+ try {
+ await fsPromises.access(filePath);
+ return true;
+ } catch {
+ return false;
+ }
+}
import type { ToolDetectionResult } from '../shared/types';
import { findHomebrewPython as findHomebrewPythonUtil } from './utils/homebrew-python';
+import {
+ getWindowsExecutablePaths,
+ getWindowsExecutablePathsAsync,
+ WINDOWS_GIT_PATHS,
+ findWindowsExecutableViaWhere,
+ findWindowsExecutableViaWhereAsync,
+} from './utils/windows-paths';
/**
* Supported CLI tools managed by this system
@@ -103,6 +130,139 @@ function isWrongPlatformPath(pathStr: string | undefined): boolean {
return false;
}
+// ============================================================================
+// SHARED HELPERS - Used by both sync and async Claude detection
+// ============================================================================
+
+/**
+ * Configuration for Claude CLI detection paths
+ */
+interface ClaudeDetectionPaths {
+ /** Homebrew paths for macOS (Apple Silicon and Intel) */
+ homebrewPaths: string[];
+ /** Platform-specific standard installation paths */
+ platformPaths: string[];
+ /** Path to NVM versions directory for Node.js-installed Claude */
+ nvmVersionsDir: string;
+}
+
+/**
+ * Get all candidate paths for Claude CLI detection.
+ *
+ * Returns platform-specific paths where Claude CLI might be installed.
+ * This pure function consolidates path configuration used by both sync
+ * and async detection methods.
+ *
+ * @param homeDir - User's home directory (from os.homedir())
+ * @returns Object containing homebrew, platform, and NVM paths
+ *
+ * @example
+ * const paths = getClaudeDetectionPaths('/Users/john');
+ * // On macOS: { homebrewPaths: ['/opt/homebrew/bin/claude', ...], ... }
+ */
+export function getClaudeDetectionPaths(homeDir: string): ClaudeDetectionPaths {
+ const homebrewPaths = [
+ '/opt/homebrew/bin/claude', // Apple Silicon
+ '/usr/local/bin/claude', // Intel Mac
+ ];
+
+ const platformPaths = process.platform === 'win32'
+ ? [
+ path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude', 'claude.exe'),
+ path.join(homeDir, 'AppData', 'Roaming', 'npm', 'claude.cmd'),
+ path.join(homeDir, '.local', 'bin', 'claude.exe'),
+ 'C:\\Program Files\\Claude\\claude.exe',
+ 'C:\\Program Files (x86)\\Claude\\claude.exe',
+ ]
+ : [
+ path.join(homeDir, '.local', 'bin', 'claude'),
+ path.join(homeDir, 'bin', 'claude'),
+ ];
+
+ const nvmVersionsDir = path.join(homeDir, '.nvm', 'versions', 'node');
+
+ return { homebrewPaths, platformPaths, nvmVersionsDir };
+}
+
+/**
+ * Sort NVM version directories by semantic version (newest first).
+ *
+ * Filters entries to only include directories starting with 'v' (version directories)
+ * and sorts them in descending order so the newest Node.js version is checked first.
+ *
+ * @param entries - Directory entries from readdir with { name, isDirectory() }
+ * @returns Array of version directory names sorted newest first
+ *
+ * @example
+ * const entries = [
+ * { name: 'v18.0.0', isDirectory: () => true },
+ * { name: 'v20.0.0', isDirectory: () => true },
+ * { name: '.DS_Store', isDirectory: () => false },
+ * ];
+ * sortNvmVersionDirs(entries); // ['v20.0.0', 'v18.0.0']
+ */
+export function sortNvmVersionDirs(
+ entries: Array<{ name: string; isDirectory(): boolean }>
+): string[] {
+ // Regex to match valid semver directories: v20.0.0, v18.17.1, etc.
+ // This prevents NaN from malformed versions (e.g., v20.abc.1) breaking sort
+ const semverRegex = /^v\d+\.\d+\.\d+$/;
+
+ return entries
+ .filter((entry) => entry.isDirectory() && semverRegex.test(entry.name))
+ .sort((a, b) => {
+ // Parse version numbers: v20.0.0 -> [20, 0, 0]
+ const vA = a.name.slice(1).split('.').map(Number);
+ const vB = b.name.slice(1).split('.').map(Number);
+ // Compare major, minor, patch in order (descending)
+ for (let i = 0; i < 3; i++) {
+ const diff = (vB[i] ?? 0) - (vA[i] ?? 0);
+ if (diff !== 0) return diff;
+ }
+ return 0;
+ })
+ .map((entry) => entry.name);
+}
+
+/**
+ * Build a ToolDetectionResult from a validation result.
+ *
+ * Returns null if validation failed, otherwise constructs the full result object.
+ * This helper consolidates the result-building logic used throughout detection.
+ *
+ * @param claudePath - The path that was validated
+ * @param validation - The validation result from validateClaude/validateClaudeAsync
+ * @param source - The source of detection ('user-config', 'homebrew', 'system-path', 'nvm')
+ * @param messagePrefix - Prefix for the success message (e.g., 'Using Homebrew Claude CLI')
+ * @returns ToolDetectionResult if valid, null if validation failed
+ *
+ * @example
+ * const result = buildClaudeDetectionResult(
+ * '/opt/homebrew/bin/claude',
+ * { valid: true, version: '1.0.0', message: 'OK' },
+ * 'homebrew',
+ * 'Using Homebrew Claude CLI'
+ * );
+ * // Returns: { found: true, path: '/opt/homebrew/bin/claude', version: '1.0.0', ... }
+ */
+export function buildClaudeDetectionResult(
+ claudePath: string,
+ validation: ToolValidation,
+ source: ToolDetectionResult['source'],
+ messagePrefix: string
+): ToolDetectionResult | null {
+ if (!validation.valid) {
+ return null;
+ }
+ return {
+ found: true,
+ path: claudePath,
+ version: validation.version,
+ source,
+ message: `${messagePrefix}: ${claudePath}`,
+ };
+}
+
/**
* Centralized CLI Tool Manager
*
@@ -392,7 +552,40 @@ class CLIToolManager {
}
}
- // 4. Not found - fallback to 'git'
+ // 4. Windows-specific detection using 'where' command (most reliable for custom installs)
+ if (process.platform === 'win32') {
+ // First try 'where' command - finds git regardless of installation location
+ const whereGitPath = findWindowsExecutableViaWhere('git', '[Git]');
+ if (whereGitPath) {
+ const validation = this.validateGit(whereGitPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: whereGitPath,
+ version: validation.version,
+ source: 'system-path',
+ message: `Using Windows Git: ${whereGitPath}`,
+ };
+ }
+ }
+
+ // Fallback to checking common installation paths
+ const windowsPaths = getWindowsExecutablePaths(WINDOWS_GIT_PATHS, '[Git]');
+ for (const winGitPath of windowsPaths) {
+ const validation = this.validateGit(winGitPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: winGitPath,
+ version: validation.version,
+ source: 'system-path',
+ message: `Using Windows Git: ${winGitPath}`,
+ };
+ }
+ }
+ }
+
+ // 5. Not found - fallback to 'git'
return {
found: false,
source: 'fallback',
@@ -517,99 +710,75 @@ class CLIToolManager {
* @returns Detection result for Claude CLI
*/
private detectClaude(): ToolDetectionResult {
+ const homeDir = os.homedir();
+ const paths = getClaudeDetectionPaths(homeDir);
+
// 1. User configuration
if (this.userConfig.claudePath) {
- // Check if path is from wrong platform (e.g., Windows path on macOS)
if (isWrongPlatformPath(this.userConfig.claudePath)) {
console.warn(
`[Claude CLI] User-configured path is from different platform, ignoring: ${this.userConfig.claudePath}`
);
} else {
const validation = this.validateClaude(this.userConfig.claudePath);
- if (validation.valid) {
- return {
- found: true,
- path: this.userConfig.claudePath,
- version: validation.version,
- source: 'user-config',
- message: `Using user-configured Claude CLI: ${this.userConfig.claudePath}`,
- };
- }
- console.warn(
- `[Claude CLI] User-configured path invalid: ${validation.message}`
+ const result = buildClaudeDetectionResult(
+ this.userConfig.claudePath, validation, 'user-config', 'Using user-configured Claude CLI'
);
+ if (result) return result;
+ console.warn(`[Claude CLI] User-configured path invalid: ${validation.message}`);
}
}
// 2. Homebrew (macOS)
if (process.platform === 'darwin') {
- const homebrewPaths = [
- '/opt/homebrew/bin/claude', // Apple Silicon
- '/usr/local/bin/claude', // Intel Mac
- ];
-
- for (const claudePath of homebrewPaths) {
+ for (const claudePath of paths.homebrewPaths) {
if (existsSync(claudePath)) {
const validation = this.validateClaude(claudePath);
- if (validation.valid) {
- return {
- found: true,
- path: claudePath,
- version: validation.version,
- source: 'homebrew',
- message: `Using Homebrew Claude CLI: ${claudePath}`,
- };
- }
+ const result = buildClaudeDetectionResult(claudePath, validation, 'homebrew', 'Using Homebrew Claude CLI');
+ if (result) return result;
}
}
}
// 3. System PATH (augmented)
- const claudePath = findExecutable('claude');
- if (claudePath) {
- const validation = this.validateClaude(claudePath);
- if (validation.valid) {
- return {
- found: true,
- path: claudePath,
- version: validation.version,
- source: 'system-path',
- message: `Using system Claude CLI: ${claudePath}`,
- };
+ const systemClaudePath = findExecutable('claude');
+ if (systemClaudePath) {
+ const validation = this.validateClaude(systemClaudePath);
+ const result = buildClaudeDetectionResult(systemClaudePath, validation, 'system-path', 'Using system Claude CLI');
+ if (result) return result;
+ }
+
+ // 4. NVM paths (Unix only) - check before platform paths for better Node.js integration
+ if (process.platform !== 'win32') {
+ try {
+ if (existsSync(paths.nvmVersionsDir)) {
+ const nodeVersions = readdirSync(paths.nvmVersionsDir, { withFileTypes: true });
+ const versionNames = sortNvmVersionDirs(nodeVersions);
+
+ for (const versionName of versionNames) {
+ const nvmClaudePath = path.join(paths.nvmVersionsDir, versionName, 'bin', 'claude');
+ if (existsSync(nvmClaudePath)) {
+ const validation = this.validateClaude(nvmClaudePath);
+ const result = buildClaudeDetectionResult(nvmClaudePath, validation, 'nvm', 'Using NVM Claude CLI');
+ if (result) return result;
+ }
+ }
+ }
+ } catch (error) {
+ console.warn(`[Claude CLI] Unable to read NVM directory: ${error}`);
}
}
- // 4. Platform-specific standard locations
- const homeDir = os.homedir();
- const platformPaths = process.platform === 'win32'
- ? [
- path.join(homeDir, 'AppData', 'Local', 'Programs', 'claude', 'claude.exe'),
- path.join(homeDir, 'AppData', 'Roaming', 'npm', 'claude.cmd'),
- path.join(homeDir, '.local', 'bin', 'claude.exe'),
- 'C:\\Program Files\\Claude\\claude.exe',
- 'C:\\Program Files (x86)\\Claude\\claude.exe',
- ]
- : [
- path.join(homeDir, '.local', 'bin', 'claude'),
- path.join(homeDir, 'bin', 'claude'),
- ];
-
- for (const claudePath of platformPaths) {
+ // 5. Platform-specific standard locations
+ for (const claudePath of paths.platformPaths) {
if (existsSync(claudePath)) {
const validation = this.validateClaude(claudePath);
- if (validation.valid) {
- return {
- found: true,
- path: claudePath,
- version: validation.version,
- source: 'system-path',
- message: `Using Claude CLI: ${claudePath}`,
- };
- }
+ const result = buildClaudeDetectionResult(claudePath, validation, 'system-path', 'Using Claude CLI');
+ if (result) return result;
}
}
- // 5. Not found
+ // 6. Not found
return {
found: false,
source: 'fallback',
@@ -759,6 +928,7 @@ class CLIToolManager {
timeout: 5000,
windowsHide: true,
shell: needsShell,
+ env: getAugmentedEnv(),
}).trim();
// Claude CLI version output format: "claude-code version X.Y.Z" or similar
@@ -778,116 +948,747 @@ class CLIToolManager {
}
}
+ // ============================================================================
+ // ASYNC METHODS - Non-blocking alternatives for Electron main process
+ // ============================================================================
+
/**
- * Get bundled Python path for packaged apps
+ * Get the path for a CLI tool asynchronously (non-blocking)
*
- * Only available in packaged Electron apps where Python is bundled
- * in the resources directory.
+ * Uses cached path if available, otherwise detects asynchronously.
+ * Safe to call from Electron main process without blocking.
*
- * @returns Path to bundled Python or null if not found
+ * @param tool - The CLI tool to get the path for
+ * @returns Promise resolving to the tool path
*/
- private getBundledPythonPath(): string | null {
- if (!app.isPackaged) {
- return null;
+ async getToolPathAsync(tool: CLITool): Promise {
+ // Check cache first (instant return if cached)
+ const cached = this.cache.get(tool);
+ if (cached) {
+ console.warn(
+ `[CLI Tools] Using cached ${tool}: ${cached.path} (${cached.source})`
+ );
+ return cached.path;
}
- const resourcesPath = process.resourcesPath;
- const isWindows = process.platform === 'win32';
-
- const pythonPath = isWindows
- ? path.join(resourcesPath, 'python', 'python.exe')
- : path.join(resourcesPath, 'python', 'bin', 'python3');
+ // Detect asynchronously
+ const result = await this.detectToolPathAsync(tool);
+ if (result.found && result.path) {
+ this.cache.set(tool, {
+ path: result.path,
+ version: result.version,
+ source: result.source,
+ });
+ console.warn(`[CLI Tools] Detected ${tool}: ${result.path} (${result.source})`);
+ return result.path;
+ }
- return existsSync(pythonPath) ? pythonPath : null;
+ // Fallback to tool name (let system PATH resolve it)
+ console.warn(`[CLI Tools] ${tool} not found, using fallback: "${tool}"`);
+ return tool;
}
/**
- * Find Homebrew Python on macOS
- * Delegates to shared utility function.
+ * Detect tool path asynchronously
*
- * @returns Path to Homebrew Python or null if not found
+ * All tools now use async detection methods to prevent blocking the main process.
+ *
+ * @param tool - The tool to detect
+ * @returns Promise resolving to detection result
*/
- private findHomebrewPython(): string | null {
- return findHomebrewPythonUtil(
- (pythonPath) => this.validatePython(pythonPath),
- '[CLI Tools]'
- );
+ private async detectToolPathAsync(tool: CLITool): Promise {
+ switch (tool) {
+ case 'claude':
+ return this.detectClaudeAsync();
+ case 'python':
+ return this.detectPythonAsync();
+ case 'git':
+ return this.detectGitAsync();
+ case 'gh':
+ return this.detectGitHubCLIAsync();
+ default:
+ return {
+ found: false,
+ source: 'fallback',
+ message: `Unknown tool: ${tool}`,
+ };
+ }
}
/**
- * Clear cache manually
+ * Validate Claude CLI asynchronously (non-blocking)
*
- * Useful for testing or forcing re-detection.
- * Normally not needed as cache is cleared automatically on settings change.
+ * @param claudeCmd - The Claude CLI command to validate
+ * @returns Promise resolving to validation result
*/
- clearCache(): void {
- this.cache.clear();
- console.warn('[CLI Tools] Cache cleared');
+ private async validateClaudeAsync(claudeCmd: string): Promise {
+ try {
+ const needsShell = process.platform === 'win32' &&
+ (claudeCmd.endsWith('.cmd') || claudeCmd.endsWith('.bat'));
+
+ const { stdout } = await execFileAsync(claudeCmd, ['--version'], {
+ encoding: 'utf-8',
+ timeout: 5000,
+ windowsHide: true,
+ shell: needsShell,
+ env: await getAugmentedEnvAsync(),
+ });
+
+ const version = stdout.trim();
+ const match = version.match(/(\d+\.\d+\.\d+)/);
+ const versionStr = match ? match[1] : version.split('\n')[0];
+
+ return {
+ valid: true,
+ version: versionStr,
+ message: `Claude CLI ${versionStr} is available`,
+ };
+ } catch (error) {
+ return {
+ valid: false,
+ message: `Failed to validate Claude CLI: ${error instanceof Error ? error.message : String(error)}`,
+ };
+ }
}
/**
- * Get tool detection info for diagnostics
- *
- * Performs fresh detection without using cache.
- * Useful for Settings UI to show current detection status.
+ * Validate Python version asynchronously (non-blocking)
*
- * @param tool - The tool to get detection info for
- * @returns Detection result with full metadata
+ * @param pythonCmd - The Python command to validate
+ * @returns Promise resolving to validation result
*/
- getToolInfo(tool: CLITool): ToolDetectionResult {
- return this.detectToolPath(tool);
- }
-}
+ private async validatePythonAsync(pythonCmd: string): Promise {
+ const MINIMUM_VERSION = '3.10.0';
-// Singleton instance
-const cliToolManager = new CLIToolManager();
+ try {
+ const parts = pythonCmd.split(' ');
+ const cmd = parts[0];
+ const args = [...parts.slice(1), '--version'];
-/**
- * Get the path for a CLI tool
- *
- * Convenience function for accessing the tool manager singleton.
- * Uses cached path if available, otherwise auto-detects.
- *
- * @param tool - The CLI tool to get the path for
- * @returns The resolved path to the tool executable
- *
- * @example
- * ```typescript
- * import { getToolPath } from './cli-tool-manager';
- *
- * const pythonPath = getToolPath('python');
- * const gitPath = getToolPath('git');
- * const ghPath = getToolPath('gh');
- *
- * execSync(`${gitPath} status`, { cwd: projectPath });
- * ```
- */
-export function getToolPath(tool: CLITool): string {
- return cliToolManager.getToolPath(tool);
-}
+ const { stdout } = await execFileAsync(cmd, args, {
+ encoding: 'utf-8',
+ timeout: 5000,
+ windowsHide: true,
+ env: await getAugmentedEnvAsync(),
+ });
-/**
- * Configure CLI tools with user settings
- *
- * Call this when user updates CLI tool paths in Settings.
- * Clears cache to force re-detection with new configuration.
- *
- * @param config - User configuration for CLI tool paths
- *
- * @example
- * ```typescript
- * import { configureTools } from './cli-tool-manager';
- *
- * // When settings are loaded or updated
- * configureTools({
- * pythonPath: settings.pythonPath,
- * gitPath: settings.gitPath,
- * githubCLIPath: settings.githubCLIPath,
- * });
- * ```
- */
-export function configureTools(config: ToolConfig): void {
- cliToolManager.configure(config);
+ const version = stdout.trim();
+ const match = version.match(/Python (\d+\.\d+\.\d+)/);
+ if (!match) {
+ return {
+ valid: false,
+ message: 'Unable to detect Python version',
+ };
+ }
+
+ const versionStr = match[1];
+ const [major, minor] = versionStr.split('.').map(Number);
+ const [reqMajor, reqMinor] = MINIMUM_VERSION.split('.').map(Number);
+
+ const meetsRequirement =
+ major > reqMajor || (major === reqMajor && minor >= reqMinor);
+
+ if (!meetsRequirement) {
+ return {
+ valid: false,
+ version: versionStr,
+ message: `Python ${versionStr} is too old. Requires ${MINIMUM_VERSION}+`,
+ };
+ }
+
+ return {
+ valid: true,
+ version: versionStr,
+ message: `Python ${versionStr} meets requirements`,
+ };
+ } catch (error) {
+ return {
+ valid: false,
+ message: `Failed to validate Python: ${error}`,
+ };
+ }
+ }
+
+ /**
+ * Validate Git asynchronously (non-blocking)
+ *
+ * @param gitCmd - The Git command to validate
+ * @returns Promise resolving to validation result
+ */
+ private async validateGitAsync(gitCmd: string): Promise {
+ try {
+ const { stdout } = await execFileAsync(gitCmd, ['--version'], {
+ encoding: 'utf-8',
+ timeout: 5000,
+ windowsHide: true,
+ env: await getAugmentedEnvAsync(),
+ });
+
+ const version = stdout.trim();
+ const match = version.match(/git version (\d+\.\d+\.\d+)/);
+ const versionStr = match ? match[1] : version;
+
+ return {
+ valid: true,
+ version: versionStr,
+ message: `Git ${versionStr} is available`,
+ };
+ } catch (error) {
+ return {
+ valid: false,
+ message: `Failed to validate Git: ${error instanceof Error ? error.message : String(error)}`,
+ };
+ }
+ }
+
+ /**
+ * Validate GitHub CLI asynchronously (non-blocking)
+ *
+ * @param ghCmd - The GitHub CLI command to validate
+ * @returns Promise resolving to validation result
+ */
+ private async validateGitHubCLIAsync(ghCmd: string): Promise {
+ try {
+ const { stdout } = await execFileAsync(ghCmd, ['--version'], {
+ encoding: 'utf-8',
+ timeout: 5000,
+ windowsHide: true,
+ env: await getAugmentedEnvAsync(),
+ });
+
+ const version = stdout.trim();
+ const match = version.match(/gh version (\d+\.\d+\.\d+)/);
+ const versionStr = match ? match[1] : version.split('\n')[0];
+
+ return {
+ valid: true,
+ version: versionStr,
+ message: `GitHub CLI ${versionStr} is available`,
+ };
+ } catch (error) {
+ return {
+ valid: false,
+ message: `Failed to validate GitHub CLI: ${error instanceof Error ? error.message : String(error)}`,
+ };
+ }
+ }
+
+ /**
+ * Detect Claude CLI asynchronously (non-blocking)
+ *
+ * Same detection logic as detectClaude but uses async validation.
+ *
+ * @returns Promise resolving to detection result
+ */
+ private async detectClaudeAsync(): Promise {
+ const homeDir = os.homedir();
+ const paths = getClaudeDetectionPaths(homeDir);
+
+ // 1. User configuration
+ if (this.userConfig.claudePath) {
+ if (isWrongPlatformPath(this.userConfig.claudePath)) {
+ console.warn(
+ `[Claude CLI] User-configured path is from different platform, ignoring: ${this.userConfig.claudePath}`
+ );
+ } else {
+ const validation = await this.validateClaudeAsync(this.userConfig.claudePath);
+ const result = buildClaudeDetectionResult(
+ this.userConfig.claudePath, validation, 'user-config', 'Using user-configured Claude CLI'
+ );
+ if (result) return result;
+ console.warn(`[Claude CLI] User-configured path invalid: ${validation.message}`);
+ }
+ }
+
+ // 2. Homebrew (macOS)
+ if (process.platform === 'darwin') {
+ for (const claudePath of paths.homebrewPaths) {
+ if (await existsAsync(claudePath)) {
+ const validation = await this.validateClaudeAsync(claudePath);
+ const result = buildClaudeDetectionResult(claudePath, validation, 'homebrew', 'Using Homebrew Claude CLI');
+ if (result) return result;
+ }
+ }
+ }
+
+ // 3. System PATH (augmented) - using async findExecutable
+ const systemClaudePath = await findExecutableAsync('claude');
+ if (systemClaudePath) {
+ const validation = await this.validateClaudeAsync(systemClaudePath);
+ const result = buildClaudeDetectionResult(systemClaudePath, validation, 'system-path', 'Using system Claude CLI');
+ if (result) return result;
+ }
+
+ // 4. NVM paths (Unix only) - check before platform paths for better Node.js integration
+ if (process.platform !== 'win32') {
+ try {
+ if (await existsAsync(paths.nvmVersionsDir)) {
+ const nodeVersions = await fsPromises.readdir(paths.nvmVersionsDir, { withFileTypes: true });
+ const versionNames = sortNvmVersionDirs(nodeVersions);
+
+ for (const versionName of versionNames) {
+ const nvmClaudePath = path.join(paths.nvmVersionsDir, versionName, 'bin', 'claude');
+ if (await existsAsync(nvmClaudePath)) {
+ const validation = await this.validateClaudeAsync(nvmClaudePath);
+ const result = buildClaudeDetectionResult(nvmClaudePath, validation, 'nvm', 'Using NVM Claude CLI');
+ if (result) return result;
+ }
+ }
+ }
+ } catch (error) {
+ console.warn(`[Claude CLI] Unable to read NVM directory: ${error}`);
+ }
+ }
+
+ // 5. Platform-specific standard locations
+ for (const claudePath of paths.platformPaths) {
+ if (await existsAsync(claudePath)) {
+ const validation = await this.validateClaudeAsync(claudePath);
+ const result = buildClaudeDetectionResult(claudePath, validation, 'system-path', 'Using Claude CLI');
+ if (result) return result;
+ }
+ }
+
+ // 6. Not found
+ return {
+ found: false,
+ source: 'fallback',
+ message: 'Claude CLI not found. Install from https://claude.ai/download',
+ };
+ }
+
+ /**
+ * Detect Python asynchronously (non-blocking)
+ *
+ * Same detection logic as detectPython but uses async validation.
+ *
+ * @returns Promise resolving to detection result
+ */
+ private async detectPythonAsync(): Promise {
+ const MINIMUM_VERSION = '3.10.0';
+
+ // 1. User configuration
+ if (this.userConfig.pythonPath) {
+ if (isWrongPlatformPath(this.userConfig.pythonPath)) {
+ console.warn(
+ `[Python] User-configured path is from different platform, ignoring: ${this.userConfig.pythonPath}`
+ );
+ } else {
+ const validation = await this.validatePythonAsync(this.userConfig.pythonPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: this.userConfig.pythonPath,
+ version: validation.version,
+ source: 'user-config',
+ message: `Using user-configured Python: ${this.userConfig.pythonPath}`,
+ };
+ }
+ console.warn(`[Python] User-configured path invalid: ${validation.message}`);
+ }
+ }
+
+ // 2. Bundled Python (packaged apps only)
+ if (app.isPackaged) {
+ const bundledPath = this.getBundledPythonPath();
+ if (bundledPath) {
+ const validation = await this.validatePythonAsync(bundledPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: bundledPath,
+ version: validation.version,
+ source: 'bundled',
+ message: `Using bundled Python: ${bundledPath}`,
+ };
+ }
+ }
+ }
+
+ // 3. Homebrew Python (macOS) - simplified async version
+ if (process.platform === 'darwin') {
+ const homebrewPaths = [
+ '/opt/homebrew/bin/python3',
+ '/opt/homebrew/bin/python3.12',
+ '/opt/homebrew/bin/python3.11',
+ '/opt/homebrew/bin/python3.10',
+ '/usr/local/bin/python3',
+ ];
+ for (const pythonPath of homebrewPaths) {
+ if (await existsAsync(pythonPath)) {
+ const validation = await this.validatePythonAsync(pythonPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: pythonPath,
+ version: validation.version,
+ source: 'homebrew',
+ message: `Using Homebrew Python: ${pythonPath}`,
+ };
+ }
+ }
+ }
+ }
+
+ // 4. System PATH (augmented)
+ const candidates =
+ process.platform === 'win32'
+ ? ['py -3', 'python', 'python3', 'py']
+ : ['python3', 'python'];
+
+ for (const cmd of candidates) {
+ if (cmd.startsWith('py ')) {
+ const validation = await this.validatePythonAsync(cmd);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: cmd,
+ version: validation.version,
+ source: 'system-path',
+ message: `Using system Python: ${cmd}`,
+ };
+ }
+ } else {
+ const pythonPath = await findExecutableAsync(cmd);
+ if (pythonPath) {
+ const validation = await this.validatePythonAsync(pythonPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: pythonPath,
+ version: validation.version,
+ source: 'system-path',
+ message: `Using system Python: ${pythonPath}`,
+ };
+ }
+ }
+ }
+ }
+
+ // 5. Not found
+ return {
+ found: false,
+ source: 'fallback',
+ message:
+ `Python ${MINIMUM_VERSION}+ not found. ` +
+ 'Please install Python or configure in Settings.',
+ };
+ }
+
+ /**
+ * Detect Git asynchronously (non-blocking)
+ *
+ * Same detection logic as detectGit but uses async validation.
+ *
+ * @returns Promise resolving to detection result
+ */
+ private async detectGitAsync(): Promise {
+ // 1. User configuration
+ if (this.userConfig.gitPath) {
+ if (isWrongPlatformPath(this.userConfig.gitPath)) {
+ console.warn(
+ `[Git] User-configured path is from different platform, ignoring: ${this.userConfig.gitPath}`
+ );
+ } else {
+ const validation = await this.validateGitAsync(this.userConfig.gitPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: this.userConfig.gitPath,
+ version: validation.version,
+ source: 'user-config',
+ message: `Using user-configured Git: ${this.userConfig.gitPath}`,
+ };
+ }
+ console.warn(`[Git] User-configured path invalid: ${validation.message}`);
+ }
+ }
+
+ // 2. Homebrew (macOS)
+ if (process.platform === 'darwin') {
+ const homebrewPaths = [
+ '/opt/homebrew/bin/git',
+ '/usr/local/bin/git',
+ ];
+
+ for (const gitPath of homebrewPaths) {
+ if (await existsAsync(gitPath)) {
+ const validation = await this.validateGitAsync(gitPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: gitPath,
+ version: validation.version,
+ source: 'homebrew',
+ message: `Using Homebrew Git: ${gitPath}`,
+ };
+ }
+ }
+ }
+ }
+
+ // 3. System PATH (augmented)
+ const gitPath = await findExecutableAsync('git');
+ if (gitPath) {
+ const validation = await this.validateGitAsync(gitPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: gitPath,
+ version: validation.version,
+ source: 'system-path',
+ message: `Using system Git: ${gitPath}`,
+ };
+ }
+ }
+
+ // 4. Windows-specific detection (async to avoid blocking main process)
+ if (process.platform === 'win32') {
+ const whereGitPath = await findWindowsExecutableViaWhereAsync('git', '[Git]');
+ if (whereGitPath) {
+ const validation = await this.validateGitAsync(whereGitPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: whereGitPath,
+ version: validation.version,
+ source: 'system-path',
+ message: `Using Windows Git: ${whereGitPath}`,
+ };
+ }
+ }
+
+ const windowsPaths = await getWindowsExecutablePathsAsync(WINDOWS_GIT_PATHS, '[Git]');
+ for (const winGitPath of windowsPaths) {
+ const validation = await this.validateGitAsync(winGitPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: winGitPath,
+ version: validation.version,
+ source: 'system-path',
+ message: `Using Windows Git: ${winGitPath}`,
+ };
+ }
+ }
+ }
+
+ // 5. Not found
+ return {
+ found: false,
+ source: 'fallback',
+ message: 'Git not found in standard locations. Using fallback "git".',
+ };
+ }
+
+ /**
+ * Detect GitHub CLI asynchronously (non-blocking)
+ *
+ * Same detection logic as detectGitHubCLI but uses async validation.
+ *
+ * @returns Promise resolving to detection result
+ */
+ private async detectGitHubCLIAsync(): Promise {
+ // 1. User configuration
+ if (this.userConfig.githubCLIPath) {
+ if (isWrongPlatformPath(this.userConfig.githubCLIPath)) {
+ console.warn(
+ `[GitHub CLI] User-configured path is from different platform, ignoring: ${this.userConfig.githubCLIPath}`
+ );
+ } else {
+ const validation = await this.validateGitHubCLIAsync(this.userConfig.githubCLIPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: this.userConfig.githubCLIPath,
+ version: validation.version,
+ source: 'user-config',
+ message: `Using user-configured GitHub CLI: ${this.userConfig.githubCLIPath}`,
+ };
+ }
+ console.warn(`[GitHub CLI] User-configured path invalid: ${validation.message}`);
+ }
+ }
+
+ // 2. Homebrew (macOS)
+ if (process.platform === 'darwin') {
+ const homebrewPaths = [
+ '/opt/homebrew/bin/gh',
+ '/usr/local/bin/gh',
+ ];
+
+ for (const ghPath of homebrewPaths) {
+ if (await existsAsync(ghPath)) {
+ const validation = await this.validateGitHubCLIAsync(ghPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: ghPath,
+ version: validation.version,
+ source: 'homebrew',
+ message: `Using Homebrew GitHub CLI: ${ghPath}`,
+ };
+ }
+ }
+ }
+ }
+
+ // 3. System PATH (augmented)
+ const ghPath = await findExecutableAsync('gh');
+ if (ghPath) {
+ const validation = await this.validateGitHubCLIAsync(ghPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: ghPath,
+ version: validation.version,
+ source: 'system-path',
+ message: `Using system GitHub CLI: ${ghPath}`,
+ };
+ }
+ }
+
+ // 4. Windows Program Files
+ if (process.platform === 'win32') {
+ const windowsPaths = [
+ 'C:\\Program Files\\GitHub CLI\\gh.exe',
+ 'C:\\Program Files (x86)\\GitHub CLI\\gh.exe',
+ ];
+
+ for (const winGhPath of windowsPaths) {
+ if (await existsAsync(winGhPath)) {
+ const validation = await this.validateGitHubCLIAsync(winGhPath);
+ if (validation.valid) {
+ return {
+ found: true,
+ path: winGhPath,
+ version: validation.version,
+ source: 'system-path',
+ message: `Using Windows GitHub CLI: ${winGhPath}`,
+ };
+ }
+ }
+ }
+ }
+
+ // 5. Not found
+ return {
+ found: false,
+ source: 'fallback',
+ message: 'GitHub CLI (gh) not found. Install from https://cli.github.com',
+ };
+ }
+
+ /**
+ * Get bundled Python path for packaged apps
+ *
+ * Only available in packaged Electron apps where Python is bundled
+ * in the resources directory.
+ *
+ * @returns Path to bundled Python or null if not found
+ */
+ private getBundledPythonPath(): string | null {
+ if (!app.isPackaged) {
+ return null;
+ }
+
+ const resourcesPath = process.resourcesPath;
+ const isWindows = process.platform === 'win32';
+
+ const pythonPath = isWindows
+ ? path.join(resourcesPath, 'python', 'python.exe')
+ : path.join(resourcesPath, 'python', 'bin', 'python3');
+
+ return existsSync(pythonPath) ? pythonPath : null;
+ }
+
+ /**
+ * Find Homebrew Python on macOS
+ * Delegates to shared utility function.
+ *
+ * @returns Path to Homebrew Python or null if not found
+ */
+ private findHomebrewPython(): string | null {
+ return findHomebrewPythonUtil(
+ (pythonPath) => this.validatePython(pythonPath),
+ '[CLI Tools]'
+ );
+ }
+
+ /**
+ * Clear cache manually
+ *
+ * Useful for testing or forcing re-detection.
+ * Normally not needed as cache is cleared automatically on settings change.
+ */
+ clearCache(): void {
+ this.cache.clear();
+ console.warn('[CLI Tools] Cache cleared');
+ }
+
+ /**
+ * Get tool detection info for diagnostics
+ *
+ * Performs fresh detection without using cache.
+ * Useful for Settings UI to show current detection status.
+ *
+ * @param tool - The tool to get detection info for
+ * @returns Detection result with full metadata
+ */
+ getToolInfo(tool: CLITool): ToolDetectionResult {
+ return this.detectToolPath(tool);
+ }
+}
+
+// Singleton instance
+const cliToolManager = new CLIToolManager();
+
+/**
+ * Get the path for a CLI tool
+ *
+ * Convenience function for accessing the tool manager singleton.
+ * Uses cached path if available, otherwise auto-detects.
+ *
+ * @param tool - The CLI tool to get the path for
+ * @returns The resolved path to the tool executable
+ *
+ * @example
+ * ```typescript
+ * import { getToolPath } from './cli-tool-manager';
+ *
+ * const pythonPath = getToolPath('python');
+ * const gitPath = getToolPath('git');
+ * const ghPath = getToolPath('gh');
+ *
+ * execSync(`${gitPath} status`, { cwd: projectPath });
+ * ```
+ */
+export function getToolPath(tool: CLITool): string {
+ return cliToolManager.getToolPath(tool);
+}
+
+/**
+ * Configure CLI tools with user settings
+ *
+ * Call this when user updates CLI tool paths in Settings.
+ * Clears cache to force re-detection with new configuration.
+ *
+ * @param config - User configuration for CLI tool paths
+ *
+ * @example
+ * ```typescript
+ * import { configureTools } from './cli-tool-manager';
+ *
+ * // When settings are loaded or updated
+ * configureTools({
+ * pythonPath: settings.pythonPath,
+ * gitPath: settings.gitPath,
+ * githubCLIPath: settings.githubCLIPath,
+ * });
+ * ```
+ */
+export function configureTools(config: ToolConfig): void {
+ cliToolManager.configure(config);
}
/**
@@ -951,3 +1752,52 @@ export function clearToolCache(): void {
export function isPathFromWrongPlatform(pathStr: string | undefined): boolean {
return isWrongPlatformPath(pathStr);
}
+
+// ============================================================================
+// ASYNC EXPORTS - Non-blocking alternatives for Electron main process
+// ============================================================================
+
+/**
+ * Get the path for a CLI tool asynchronously (non-blocking)
+ *
+ * Safe to call from Electron main process without blocking the event loop.
+ * Uses cached path if available, otherwise detects asynchronously.
+ *
+ * @param tool - The CLI tool to get the path for
+ * @returns Promise resolving to the tool path
+ *
+ * @example
+ * ```typescript
+ * import { getToolPathAsync } from './cli-tool-manager';
+ *
+ * const claudePath = await getToolPathAsync('claude');
+ * ```
+ */
+export async function getToolPathAsync(tool: CLITool): Promise {
+ return cliToolManager.getToolPathAsync(tool);
+}
+
+/**
+ * Pre-warm the CLI tool cache asynchronously
+ *
+ * Call this during app startup to detect tools in the background.
+ * Subsequent calls to getToolPath/getToolPathAsync will use cached values.
+ *
+ * @param tools - Array of tools to pre-warm (defaults to ['claude'])
+ *
+ * @example
+ * ```typescript
+ * import { preWarmToolCache } from './cli-tool-manager';
+ *
+ * // In app startup
+ * app.whenReady().then(() => {
+ * // ... setup code ...
+ * preWarmToolCache(['claude', 'git', 'gh']);
+ * });
+ * ```
+ */
+export async function preWarmToolCache(tools: CLITool[] = ['claude']): Promise {
+ console.warn('[CLI Tools] Pre-warming cache for:', tools.join(', '));
+ await Promise.all(tools.map(tool => cliToolManager.getToolPathAsync(tool)));
+ console.warn('[CLI Tools] Cache pre-warming complete');
+}
diff --git a/apps/frontend/src/main/env-utils.ts b/apps/frontend/src/main/env-utils.ts
index 9a1325ce15..01972d6af0 100644
--- a/apps/frontend/src/main/env-utils.ts
+++ b/apps/frontend/src/main/env-utils.ts
@@ -12,7 +12,32 @@
import * as os from 'os';
import * as path from 'path';
import * as fs from 'fs';
-import { execFileSync } from 'child_process';
+import { promises as fsPromises } from 'fs';
+import { execFileSync, execFile } from 'child_process';
+import { promisify } from 'util';
+
+const execFileAsync = promisify(execFile);
+
+/**
+ * Check if a path exists asynchronously (non-blocking)
+ *
+ * Uses fs.promises.access which is non-blocking, unlike fs.existsSync.
+ *
+ * @param filePath - The path to check
+ * @returns Promise resolving to true if path exists, false otherwise
+ */
+async function existsAsync(filePath: string): Promise {
+ try {
+ await fsPromises.access(filePath);
+ return true;
+ } catch {
+ return false;
+ }
+}
+
+// Cache for npm global prefix to avoid repeated async calls
+let npmGlobalPrefixCache: string | null | undefined = undefined;
+let npmGlobalPrefixCachePromise: Promise | null = null;
/**
* Get npm global prefix directory dynamically
@@ -30,10 +55,12 @@ function getNpmGlobalPrefix(): string | null {
// On Windows, use npm.cmd for proper command resolution
const npmCommand = process.platform === 'win32' ? 'npm.cmd' : 'npm';
- const rawPrefix = execFileSync(npmCommand, ['config', 'get', 'prefix'], {
+ // Use --location=global to bypass workspace context and avoid ENOWORKSPACES error
+ const rawPrefix = execFileSync(npmCommand, ['config', 'get', 'prefix', '--location=global'], {
encoding: 'utf-8',
timeout: 3000,
windowsHide: true,
+ cwd: os.homedir(), // Run from home dir to avoid ENOWORKSPACES error in monorepos
shell: process.platform === 'win32', // Enable shell on Windows for .cmd resolution
}).trim();
@@ -60,19 +87,22 @@ function getNpmGlobalPrefix(): string | null {
* Common binary directories that should be in PATH
* These are locations where commonly used tools are installed
*/
-const COMMON_BIN_PATHS: Record = {
+export const COMMON_BIN_PATHS: Record = {
darwin: [
'/opt/homebrew/bin', // Apple Silicon Homebrew
'/usr/local/bin', // Intel Homebrew / system
+ '/usr/local/share/dotnet', // .NET SDK
'/opt/homebrew/sbin', // Apple Silicon Homebrew sbin
'/usr/local/sbin', // Intel Homebrew sbin
'~/.local/bin', // User-local binaries (Claude CLI)
+ '~/.dotnet/tools', // .NET global tools
],
linux: [
'/usr/local/bin',
'/usr/bin', // System binaries (Python, etc.)
'/snap/bin', // Snap packages
'~/.local/bin', // User-local binaries
+ '~/.dotnet/tools', // .NET global tools
'/usr/sbin', // System admin binaries
],
win32: [
@@ -82,6 +112,77 @@ const COMMON_BIN_PATHS: Record = {
],
};
+/**
+ * Essential system directories that must always be in PATH
+ * Required for core system functionality (e.g., /usr/bin/security for Keychain access)
+ */
+const ESSENTIAL_SYSTEM_PATHS: string[] = ['/usr/bin', '/bin', '/usr/sbin', '/sbin'];
+
+/**
+ * Get expanded platform paths for PATH augmentation
+ *
+ * Shared helper used by both sync and async getAugmentedEnv functions.
+ * Expands home directory (~) in paths and returns the list of candidate paths.
+ *
+ * @param additionalPaths - Optional additional paths to include
+ * @returns Array of expanded paths (without existence checking)
+ */
+function getExpandedPlatformPaths(additionalPaths?: string[]): string[] {
+ const platform = process.platform as 'darwin' | 'linux' | 'win32';
+ const homeDir = os.homedir();
+
+ // Get platform-specific paths and expand home directory
+ const platformPaths = COMMON_BIN_PATHS[platform] || [];
+ const expandedPaths = platformPaths.map(p =>
+ p.startsWith('~') ? p.replace('~', homeDir) : p
+ );
+
+ // Add user-requested additional paths (expanded)
+ if (additionalPaths) {
+ for (const p of additionalPaths) {
+ const expanded = p.startsWith('~') ? p.replace('~', homeDir) : p;
+ expandedPaths.push(expanded);
+ }
+ }
+
+ return expandedPaths;
+}
+
+/**
+ * Build augmented PATH by filtering existing paths
+ *
+ * Shared helper that takes candidate paths and a set of current PATH entries,
+ * returning only paths that should be added.
+ *
+ * @param candidatePaths - Array of paths to consider adding
+ * @param currentPathSet - Set of paths already in PATH
+ * @param existingPaths - Array of paths that actually exist on the filesystem
+ * @param npmPrefix - npm global prefix path (or null if not found)
+ * @returns Array of paths to prepend to PATH
+ */
+function buildPathsToAdd(
+ candidatePaths: string[],
+ currentPathSet: Set,
+ existingPaths: Set,
+ npmPrefix: string | null
+): string[] {
+ const pathsToAdd: string[] = [];
+
+ // Add platform-specific paths that exist
+ for (const p of candidatePaths) {
+ if (!currentPathSet.has(p) && existingPaths.has(p)) {
+ pathsToAdd.push(p);
+ }
+ }
+
+ // Add npm global prefix if it exists
+ if (npmPrefix && !currentPathSet.has(npmPrefix) && existingPaths.has(npmPrefix)) {
+ pathsToAdd.push(npmPrefix);
+ }
+
+ return pathsToAdd;
+}
+
/**
* Get augmented environment with additional PATH entries
*
@@ -97,48 +198,44 @@ export function getAugmentedEnv(additionalPaths?: string[]): Record
- p.startsWith('~') ? p.replace('~', homeDir) : p
- );
+ // Get all candidate paths (platform + additional)
+ const candidatePaths = getExpandedPlatformPaths(additionalPaths);
- // Collect paths to add (only if they exist and aren't already in PATH)
- const currentPath = env.PATH || '';
- const currentPathSet = new Set(currentPath.split(pathSeparator));
+ // Ensure PATH has essential system directories when launched from Finder/Dock.
+ // When Electron launches from GUI (not terminal), PATH might be empty or minimal.
+ // The Claude Agent SDK needs /usr/bin/security to access macOS Keychain.
+ let currentPath = env.PATH || '';
- const pathsToAdd: string[] = [];
+ // On macOS/Linux, ensure basic system paths are always present
+ if (platform !== 'win32') {
+ const pathSetForEssentials = new Set(currentPath.split(pathSeparator).filter(Boolean));
+ const missingEssentials = ESSENTIAL_SYSTEM_PATHS.filter(p => !pathSetForEssentials.has(p));
- // Add platform-specific paths
- for (const p of expandedPaths) {
- if (!currentPathSet.has(p) && fs.existsSync(p)) {
- pathsToAdd.push(p);
+ if (missingEssentials.length > 0) {
+ // Append essential paths if missing (append, not prepend, to respect user's PATH)
+ currentPath = currentPath
+ ? `${currentPath}${pathSeparator}${missingEssentials.join(pathSeparator)}`
+ : missingEssentials.join(pathSeparator);
}
}
- // Add npm global prefix dynamically (cross-platform: works with standard npm, nvm, nvm-windows)
+ // Collect paths to add (only if they exist and aren't already in PATH)
+ const currentPathSet = new Set(currentPath.split(pathSeparator).filter(Boolean));
+
+ // Check existence synchronously and build existing paths set
+ const existingPaths = new Set(candidatePaths.filter(p => fs.existsSync(p)));
+
+ // Get npm global prefix dynamically
const npmPrefix = getNpmGlobalPrefix();
- if (npmPrefix && !currentPathSet.has(npmPrefix) && fs.existsSync(npmPrefix)) {
- pathsToAdd.push(npmPrefix);
+ if (npmPrefix && fs.existsSync(npmPrefix)) {
+ existingPaths.add(npmPrefix);
}
- // Add user-requested additional paths
- if (additionalPaths) {
- for (const p of additionalPaths) {
- const expanded = p.startsWith('~') ? p.replace('~', homeDir) : p;
- if (!currentPathSet.has(expanded) && fs.existsSync(expanded)) {
- pathsToAdd.push(expanded);
- }
- }
- }
+ // Build final paths to add using shared helper
+ const pathsToAdd = buildPathsToAdd(candidatePaths, currentPathSet, existingPaths, npmPrefix);
// Prepend new paths to PATH (prepend so they take priority)
- if (pathsToAdd.length > 0) {
- env.PATH = [...pathsToAdd, currentPath].filter(Boolean).join(pathSeparator);
- }
+ env.PATH = [...pathsToAdd, currentPath].filter(Boolean).join(pathSeparator);
return env;
}
@@ -184,3 +281,160 @@ export function findExecutable(command: string): string | null {
export function isCommandAvailable(command: string): boolean {
return findExecutable(command) !== null;
}
+
+// ============================================================================
+// ASYNC VERSIONS - Non-blocking alternatives for Electron main process
+// ============================================================================
+
+/**
+ * Get npm global prefix directory asynchronously (non-blocking)
+ *
+ * Uses caching to avoid repeated subprocess calls. Safe to call from
+ * Electron main process without blocking the event loop.
+ *
+ * @returns Promise resolving to npm global binaries directory, or null
+ */
+async function getNpmGlobalPrefixAsync(): Promise {
+ // Return cached value if available
+ if (npmGlobalPrefixCache !== undefined) {
+ return npmGlobalPrefixCache;
+ }
+
+ // If a fetch is already in progress, wait for it
+ if (npmGlobalPrefixCachePromise) {
+ return npmGlobalPrefixCachePromise;
+ }
+
+ // Start the async fetch
+ npmGlobalPrefixCachePromise = (async () => {
+ try {
+ const npmCommand = process.platform === 'win32' ? 'npm.cmd' : 'npm';
+
+ const { stdout } = await execFileAsync(npmCommand, ['config', 'get', 'prefix', '--location=global'], {
+ encoding: 'utf-8',
+ timeout: 3000,
+ windowsHide: true,
+ cwd: os.homedir(), // Run from home dir to avoid ENOWORKSPACES error in monorepos
+ shell: process.platform === 'win32',
+ });
+
+ const rawPrefix = stdout.trim();
+ if (!rawPrefix) {
+ npmGlobalPrefixCache = null;
+ return null;
+ }
+
+ const binPath = process.platform === 'win32'
+ ? rawPrefix
+ : path.join(rawPrefix, 'bin');
+
+ const normalizedPath = path.normalize(binPath);
+ npmGlobalPrefixCache = await existsAsync(normalizedPath) ? normalizedPath : null;
+ return npmGlobalPrefixCache;
+ } catch (error) {
+ console.warn(`[env-utils] Failed to get npm global prefix: ${error}`);
+ npmGlobalPrefixCache = null;
+ return null;
+ } finally {
+ npmGlobalPrefixCachePromise = null;
+ }
+ })();
+
+ return npmGlobalPrefixCachePromise;
+}
+
+/**
+ * Get augmented environment asynchronously (non-blocking)
+ *
+ * Same as getAugmentedEnv but uses async npm prefix detection.
+ * Safe to call from Electron main process without blocking.
+ *
+ * @param additionalPaths - Optional array of additional paths to include
+ * @returns Promise resolving to environment object with augmented PATH
+ */
+export async function getAugmentedEnvAsync(additionalPaths?: string[]): Promise> {
+ const env = { ...process.env } as Record;
+ const platform = process.platform as 'darwin' | 'linux' | 'win32';
+ const pathSeparator = platform === 'win32' ? ';' : ':';
+
+ // Get all candidate paths (platform + additional)
+ const candidatePaths = getExpandedPlatformPaths(additionalPaths);
+
+ // Ensure essential system paths are present (for macOS Keychain access)
+ let currentPath = env.PATH || '';
+
+ if (platform !== 'win32') {
+ const pathSetForEssentials = new Set(currentPath.split(pathSeparator).filter(Boolean));
+ const missingEssentials = ESSENTIAL_SYSTEM_PATHS.filter(p => !pathSetForEssentials.has(p));
+
+ if (missingEssentials.length > 0) {
+ currentPath = currentPath
+ ? `${currentPath}${pathSeparator}${missingEssentials.join(pathSeparator)}`
+ : missingEssentials.join(pathSeparator);
+ }
+ }
+
+ // Collect paths to add (only if they exist and aren't already in PATH)
+ const currentPathSet = new Set(currentPath.split(pathSeparator).filter(Boolean));
+
+ // Check existence asynchronously in parallel for performance
+ const pathChecks = await Promise.all(
+ candidatePaths.map(async (p) => ({ path: p, exists: await existsAsync(p) }))
+ );
+ const existingPaths = new Set(
+ pathChecks.filter(({ exists }) => exists).map(({ path: p }) => p)
+ );
+
+ // Get npm global prefix dynamically (async - non-blocking)
+ const npmPrefix = await getNpmGlobalPrefixAsync();
+ if (npmPrefix && await existsAsync(npmPrefix)) {
+ existingPaths.add(npmPrefix);
+ }
+
+ // Build final paths to add using shared helper
+ const pathsToAdd = buildPathsToAdd(candidatePaths, currentPathSet, existingPaths, npmPrefix);
+
+ // Prepend new paths to PATH (prepend so they take priority)
+ env.PATH = [...pathsToAdd, currentPath].filter(Boolean).join(pathSeparator);
+
+ return env;
+}
+
+/**
+ * Find the full path to an executable asynchronously (non-blocking)
+ *
+ * Same as findExecutable but uses async environment augmentation.
+ *
+ * @param command - The command name to find (e.g., 'gh', 'git')
+ * @returns Promise resolving to the full path to the executable, or null
+ */
+export async function findExecutableAsync(command: string): Promise {
+ const env = await getAugmentedEnvAsync();
+ const pathSeparator = process.platform === 'win32' ? ';' : ':';
+ const pathDirs = (env.PATH || '').split(pathSeparator);
+
+ const extensions = process.platform === 'win32'
+ ? ['.exe', '.cmd', '.bat', '.ps1', '']
+ : [''];
+
+ for (const dir of pathDirs) {
+ for (const ext of extensions) {
+ const fullPath = path.join(dir, command + ext);
+ if (await existsAsync(fullPath)) {
+ return fullPath;
+ }
+ }
+ }
+
+ return null;
+}
+
+/**
+ * Clear the npm global prefix cache
+ *
+ * Call this if npm configuration changes and you need fresh detection.
+ */
+export function clearNpmPrefixCache(): void {
+ npmGlobalPrefixCache = undefined;
+ npmGlobalPrefixCachePromise = null;
+}
diff --git a/apps/frontend/src/main/index.ts b/apps/frontend/src/main/index.ts
index 7cd856a0fe..8ee2eaf76c 100644
--- a/apps/frontend/src/main/index.ts
+++ b/apps/frontend/src/main/index.ts
@@ -1,6 +1,28 @@
-import { app, BrowserWindow, shell, nativeImage } from 'electron';
+// Load .env file FIRST before any other imports that might use process.env
+import { config } from 'dotenv';
+import { resolve, dirname } from 'path';
+import { existsSync } from 'fs';
+
+// Load .env from apps/frontend directory
+// In development: __dirname is out/main (compiled), so go up 2 levels
+// In production: app resources directory
+const possibleEnvPaths = [
+ resolve(__dirname, '../../.env'), // Development: out/main -> apps/frontend/.env
+ resolve(__dirname, '../../../.env'), // Alternative: might be in different location
+ resolve(process.cwd(), 'apps/frontend/.env'), // Fallback: from workspace root
+];
+
+for (const envPath of possibleEnvPaths) {
+ if (existsSync(envPath)) {
+ config({ path: envPath });
+ console.log(`[dotenv] Loaded environment from: ${envPath}`);
+ break;
+ }
+}
+
+import { app, BrowserWindow, shell, nativeImage, session, screen } from 'electron';
import { join } from 'path';
-import { accessSync, readFileSync, writeFileSync } from 'fs';
+import { accessSync, readFileSync, writeFileSync, rmSync } from 'fs';
import { electronApp, optimizer, is } from '@electron-toolkit/utils';
import { setupIpcHandlers } from './ipc-setup';
import { AgentManager } from './agent';
@@ -12,11 +34,34 @@ import { initializeAppUpdater } from './app-updater';
import { DEFAULT_APP_SETTINGS } from '../shared/constants';
import { readSettingsFile } from './settings-utils';
import { setupErrorLogging } from './app-logger';
+import { initSentryMain } from './sentry';
+import { preWarmToolCache } from './cli-tool-manager';
+import { initializeClaudeProfileManager } from './claude-profile-manager';
import type { AppSettings } from '../shared/types';
+// ─────────────────────────────────────────────────────────────────────────────
+// Window sizing constants
+// ─────────────────────────────────────────────────────────────────────────────
+/** Preferred window width on startup */
+const WINDOW_PREFERRED_WIDTH: number = 1400;
+/** Preferred window height on startup */
+const WINDOW_PREFERRED_HEIGHT: number = 900;
+/** Absolute minimum window width (supports high DPI displays with scaling) */
+const WINDOW_MIN_WIDTH: number = 800;
+/** Absolute minimum window height (supports high DPI displays with scaling) */
+const WINDOW_MIN_HEIGHT: number = 500;
+/** Margin from screen edges to avoid edge-to-edge windows */
+const WINDOW_SCREEN_MARGIN: number = 20;
+/** Default screen dimensions used as fallback when screen.getPrimaryDisplay() fails */
+const DEFAULT_SCREEN_WIDTH: number = 1920;
+const DEFAULT_SCREEN_HEIGHT: number = 1080;
+
// Setup error logging early (captures uncaught exceptions)
setupErrorLogging();
+// Initialize Sentry for error tracking (respects user's sentryEnabled setting)
+initSentryMain();
+
/**
* Load app settings synchronously (for use during startup).
* This is a simple merge with defaults - no migrations or auto-detection.
@@ -26,6 +71,32 @@ function loadSettingsSync(): AppSettings {
return { ...DEFAULT_APP_SETTINGS, ...savedSettings } as AppSettings;
}
+/**
+ * Clean up stale update metadata files from the redundant source updater system.
+ *
+ * The old "source updater" wrote .update-metadata.json files that could persist
+ * across app updates and cause version display desync. This cleanup ensures
+ * we use the actual bundled version from app.getVersion().
+ */
+function cleanupStaleUpdateMetadata(): void {
+ const userData = app.getPath('userData');
+ const stalePaths = [
+ join(userData, 'auto-claude-source'),
+ join(userData, 'backend-source'),
+ ];
+
+ for (const stalePath of stalePaths) {
+ if (existsSync(stalePath)) {
+ try {
+ rmSync(stalePath, { recursive: true, force: true });
+ console.warn(`[main] Cleaned up stale update metadata: ${stalePath}`);
+ } catch (e) {
+ console.warn(`[main] Failed to clean up stale metadata at ${stalePath}:`, e);
+ }
+ }
+ }
+}
+
// Get icon path based on platform
function getIconPath(): string {
// In dev mode, __dirname is out/main, so we go up to project root then into resources
@@ -54,12 +125,51 @@ let agentManager: AgentManager | null = null;
let terminalManager: TerminalManager | null = null;
function createWindow(): void {
+ // Get the primary display's work area (accounts for taskbar, dock, etc.)
+ // Wrapped in try/catch to handle potential failures with fallback to safe defaults
+ let workAreaSize: { width: number; height: number };
+ try {
+ const display = screen.getPrimaryDisplay();
+ // Validate the returned object has expected structure with valid dimensions
+ if (
+ display &&
+ display.workAreaSize &&
+ typeof display.workAreaSize.width === 'number' &&
+ typeof display.workAreaSize.height === 'number' &&
+ display.workAreaSize.width > 0 &&
+ display.workAreaSize.height > 0
+ ) {
+ workAreaSize = display.workAreaSize;
+ } else {
+ console.error(
+ '[main] screen.getPrimaryDisplay() returned unexpected structure:',
+ JSON.stringify(display)
+ );
+ workAreaSize = { width: DEFAULT_SCREEN_WIDTH, height: DEFAULT_SCREEN_HEIGHT };
+ }
+ } catch (error: unknown) {
+ console.error('[main] Failed to get primary display, using fallback dimensions:', error);
+ workAreaSize = { width: DEFAULT_SCREEN_WIDTH, height: DEFAULT_SCREEN_HEIGHT };
+ }
+
+ // Calculate available space with a small margin to avoid edge-to-edge windows
+ const availableWidth: number = workAreaSize.width - WINDOW_SCREEN_MARGIN;
+ const availableHeight: number = workAreaSize.height - WINDOW_SCREEN_MARGIN;
+
+ // Calculate actual dimensions (preferred, but capped to margin-adjusted available space)
+ const width: number = Math.min(WINDOW_PREFERRED_WIDTH, availableWidth);
+ const height: number = Math.min(WINDOW_PREFERRED_HEIGHT, availableHeight);
+
+ // Ensure minimum dimensions don't exceed the actual initial window size
+ const minWidth: number = Math.min(WINDOW_MIN_WIDTH, width);
+ const minHeight: number = Math.min(WINDOW_MIN_HEIGHT, height);
+
// Create the browser window
mainWindow = new BrowserWindow({
- width: 1400,
- height: 900,
- minWidth: 1000,
- minHeight: 700,
+ width,
+ height,
+ minWidth,
+ minHeight,
show: false,
autoHideMenuBar: true,
titleBarStyle: 'hiddenInset',
@@ -110,11 +220,29 @@ if (process.platform === 'darwin') {
app.name = 'Auto Claude';
}
+// Fix Windows GPU cache permission errors (0x5 Access Denied)
+if (process.platform === 'win32') {
+ app.commandLine.appendSwitch('disable-gpu-shader-disk-cache');
+ app.commandLine.appendSwitch('disable-gpu-program-cache');
+ console.log('[main] Applied Windows GPU cache fixes');
+}
+
// Initialize the application
app.whenReady().then(() => {
// Set app user model id for Windows
electronApp.setAppUserModelId('com.autoclaude.ui');
+ // Clear cache on Windows to prevent permission errors from stale cache
+ if (process.platform === 'win32') {
+ session.defaultSession.clearCache()
+ .then(() => console.log('[main] Cleared cache on startup'))
+ .catch((err) => console.warn('[main] Failed to clear cache:', err));
+ }
+
+ // Clean up stale update metadata from the old source updater system
+ // This prevents version display desync after electron-updater installs a new version
+ cleanupStaleUpdateMetadata();
+
// Set dock icon on macOS
if (process.platform === 'darwin') {
const iconPath = getIconPath();
@@ -222,6 +350,23 @@ app.whenReady().then(() => {
// Create window
createWindow();
+ // Pre-warm CLI tool cache in background (non-blocking)
+ // This ensures CLI detection is done before user needs it
+ // Include all commonly used tools to prevent sync blocking on first use
+ setImmediate(() => {
+ preWarmToolCache(['claude', 'git', 'gh', 'python']).catch((error) => {
+ console.warn('[main] Failed to pre-warm CLI cache:', error);
+ });
+ });
+
+ // Pre-initialize Claude profile manager in background (non-blocking)
+ // This ensures profile data is loaded before user clicks "Start Claude Code"
+ setImmediate(() => {
+ initializeClaudeProfileManager().catch((error) => {
+ console.warn('[main] Failed to pre-initialize profile manager:', error);
+ });
+ });
+
// Initialize usage monitoring after window is created
if (mainWindow) {
// Setup event forwarding from usage monitor to renderer
diff --git a/apps/frontend/src/main/insights/config.ts b/apps/frontend/src/main/insights/config.ts
index 0ca1609c13..97e8a9a28d 100644
--- a/apps/frontend/src/main/insights/config.ts
+++ b/apps/frontend/src/main/insights/config.ts
@@ -1,9 +1,12 @@
import path from 'path';
import { existsSync, readFileSync } from 'fs';
-import { app } from 'electron';
import { getProfileEnv } from '../rate-limit-detector';
+import { getAPIProfileEnv } from '../services/profile';
+import { getOAuthModeClearVars } from '../agent/env-utils';
+import { pythonEnvManager, getConfiguredPythonPath } from '../python-env-manager';
import { getValidatedPythonPath } from '../python-detector';
-import { getConfiguredPythonPath } from '../python-env-manager';
+import { getAugmentedEnv } from '../env-utils';
+import { getEffectiveSourcePath } from '../updater/path-resolver';
/**
* Configuration manager for insights service
@@ -40,24 +43,23 @@ export class InsightsConfig {
/**
* Get the auto-claude source path (detects automatically if not configured)
+ * Uses getEffectiveSourcePath() which handles userData override for user-updated backend
*/
getAutoBuildSourcePath(): string | null {
if (this.autoBuildSourcePath && existsSync(this.autoBuildSourcePath)) {
return this.autoBuildSourcePath;
}
- const possiblePaths = [
- // Apps structure: from out/main -> apps/backend
- path.resolve(__dirname, '..', '..', '..', 'backend'),
- path.resolve(app.getAppPath(), '..', 'backend'),
- path.resolve(process.cwd(), 'apps', 'backend')
- ];
-
- for (const p of possiblePaths) {
- if (existsSync(p) && existsSync(path.join(p, 'runners', 'spec_runner.py'))) {
- return p;
- }
+ // Use shared path resolver which handles:
+ // 1. User settings (autoBuildPath)
+ // 2. userData override (backend-source) for user-updated backend
+ // 3. Bundled backend (process.resourcesPath/backend)
+ // 4. Development paths
+ const effectivePath = getEffectiveSourcePath();
+ if (existsSync(effectivePath) && existsSync(path.join(effectivePath, 'runners', 'spec_runner.py'))) {
+ return effectivePath;
}
+
return null;
}
@@ -104,17 +106,51 @@ export class InsightsConfig {
* Get complete environment for process execution
* Includes system env, auto-claude env, and active Claude profile
*/
- getProcessEnv(): Record {
+ async getProcessEnv(): Promise> {
const autoBuildEnv = this.loadAutoBuildEnv();
const profileEnv = getProfileEnv();
+ const apiProfileEnv = await getAPIProfileEnv();
+ const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv);
+ const pythonEnv = pythonEnvManager.getPythonEnv();
+ const autoBuildSource = this.getAutoBuildSourcePath();
+ const pythonPathParts = (pythonEnv.PYTHONPATH ?? '')
+ .split(path.delimiter)
+ .map((entry) => entry.trim())
+ .filter(Boolean)
+ .map((entry) => path.resolve(entry));
+
+ if (autoBuildSource) {
+ const normalizedAutoBuildSource = path.resolve(autoBuildSource);
+ const autoBuildComparator = process.platform === 'win32'
+ ? normalizedAutoBuildSource.toLowerCase()
+ : normalizedAutoBuildSource;
+ const hasAutoBuildSource = pythonPathParts.some((entry) => {
+ const candidate = process.platform === 'win32' ? entry.toLowerCase() : entry;
+ return candidate === autoBuildComparator;
+ });
+
+ if (!hasAutoBuildSource) {
+ pythonPathParts.push(normalizedAutoBuildSource);
+ }
+ }
+
+ const combinedPythonPath = pythonPathParts.join(path.delimiter);
+
+ // Use getAugmentedEnv() to ensure common tool paths (claude, dotnet, etc.)
+ // are available even when app is launched from Finder/Dock.
+ const augmentedEnv = getAugmentedEnv();
return {
- ...process.env as Record,
+ ...augmentedEnv,
+ ...pythonEnv, // Include PYTHONPATH for bundled site-packages
...autoBuildEnv,
+ ...oauthModeClearVars,
...profileEnv,
+ ...apiProfileEnv,
PYTHONUNBUFFERED: '1',
PYTHONIOENCODING: 'utf-8',
- PYTHONUTF8: '1'
+ PYTHONUTF8: '1',
+ ...(combinedPythonPath ? { PYTHONPATH: combinedPythonPath } : {})
};
}
}
diff --git a/apps/frontend/src/main/insights/insights-executor.ts b/apps/frontend/src/main/insights/insights-executor.ts
index d5565620fe..0c349b3480 100644
--- a/apps/frontend/src/main/insights/insights-executor.ts
+++ b/apps/frontend/src/main/insights/insights-executor.ts
@@ -85,7 +85,7 @@ export class InsightsExecutor extends EventEmitter {
} as InsightsChatStatus);
// Get process environment
- const processEnv = this.config.getProcessEnv();
+ const processEnv = await this.config.getProcessEnv();
// Write conversation history to temp file to avoid Windows command-line length limit
const historyFile = path.join(
@@ -130,6 +130,7 @@ export class InsightsExecutor extends EventEmitter {
let suggestedTask: InsightsChatMessage['suggestedTask'] | undefined;
const toolsUsed: InsightsToolUsage[] = [];
let allInsightsOutput = '';
+ let stderrOutput = '';
proc.stdout?.on('data', (data: Buffer) => {
const text = data.toString();
@@ -159,8 +160,9 @@ export class InsightsExecutor extends EventEmitter {
proc.stderr?.on('data', (data: Buffer) => {
const text = data.toString();
- // Collect stderr for rate limit detection too
+ // Collect stderr for rate limit detection and error reporting
allInsightsOutput = (allInsightsOutput + text).slice(-10000);
+ stderrOutput = (stderrOutput + text).slice(-2000);
console.error('[Insights]', text);
});
@@ -196,7 +198,11 @@ export class InsightsExecutor extends EventEmitter {
toolsUsed
});
} else {
- const error = `Process exited with code ${code}`;
+ // Include stderr output in error message for debugging
+ const stderrSummary = stderrOutput.trim()
+ ? `\n\nError output:\n${stderrOutput.slice(-500)}`
+ : '';
+ const error = `Process exited with code ${code}${stderrSummary}`;
this.emit('stream-chunk', projectId, {
type: 'error',
error
diff --git a/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts b/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts
index cbe4a67b68..8a87872445 100644
--- a/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/agent-events-handlers.ts
@@ -1,6 +1,8 @@
import type { BrowserWindow } from 'electron';
import path from 'path';
-import { IPC_CHANNELS, getSpecsDir, AUTO_BUILD_PATHS } from '../../shared/constants';
+import { existsSync } from 'fs';
+import { IPC_CHANNELS, AUTO_BUILD_PATHS, getSpecsDir } from '../../shared/constants';
+import { wouldPhaseRegress, isTerminalPhase, isValidExecutionPhase, type ExecutionPhase } from '../../shared/constants/phase-protocol';
import type {
SDKRateLimitInfo,
Task,
@@ -15,6 +17,56 @@ import { fileWatcher } from '../file-watcher';
import { projectStore } from '../project-store';
import { notificationService } from '../notification-service';
import { persistPlanStatusSync, getPlanPath } from './task/plan-file-utils';
+import { findTaskWorktree } from '../worktree-paths';
+import { findTaskAndProject } from './task/shared';
+
+
+/**
+ * Validates status transitions to prevent invalid state changes.
+ * FIX (ACS-55, ACS-71): Adds guardrails against bad status transitions.
+ * FIX (PR Review): Uses comprehensive wouldPhaseRegress() utility instead of hardcoded checks.
+ *
+ * @param task - The current task (may be undefined if not found)
+ * @param newStatus - The proposed new status
+ * @param phase - The execution phase that triggered this transition
+ * @returns true if transition is valid, false if it should be blocked
+ */
+function validateStatusTransition(
+ task: Task | undefined,
+ newStatus: TaskStatus,
+ phase: string
+): boolean {
+ // Can't validate without task data - allow the transition
+ if (!task) return true;
+
+ // Don't allow human_review without subtasks
+ // This prevents tasks from jumping to review before planning is complete
+ if (newStatus === 'human_review' && (!task.subtasks || task.subtasks.length === 0)) {
+ console.warn(`[validateStatusTransition] Blocking human_review - task ${task.id} has no subtasks (phase: ${phase})`);
+ return false;
+ }
+
+ // FIX (PR Review): Use comprehensive phase regression check instead of hardcoded checks
+ // This handles all phase regressions (qa_review→coding, complete→coding, etc.)
+ // not just the specific coding→planning case
+ const currentPhase = task.executionProgress?.phase;
+ if (currentPhase && isValidExecutionPhase(currentPhase) && isValidExecutionPhase(phase)) {
+ // Block transitions from terminal phases (complete/failed)
+ if (isTerminalPhase(currentPhase)) {
+ console.warn(`[validateStatusTransition] Blocking transition from terminal phase: ${currentPhase} for task ${task.id}`);
+ return false;
+ }
+
+ // Block any phase regression (going backwards in the workflow)
+ // Note: Cast phase to ExecutionPhase since isValidExecutionPhase() type guard doesn't narrow through function calls
+ if (wouldPhaseRegress(currentPhase, phase as ExecutionPhase)) {
+ console.warn(`[validateStatusTransition] Blocking phase regression: ${currentPhase} -> ${phase} for task ${task.id}`);
+ return false;
+ }
+ }
+
+ return true;
+}
/**
@@ -31,14 +83,18 @@ export function registerAgenteventsHandlers(
agentManager.on('log', (taskId: string, log: string) => {
const mainWindow = getMainWindow();
if (mainWindow) {
- mainWindow.webContents.send(IPC_CHANNELS.TASK_LOG, taskId, log);
+ // Include projectId for multi-project filtering (issue #723)
+ const { project } = findTaskAndProject(taskId);
+ mainWindow.webContents.send(IPC_CHANNELS.TASK_LOG, taskId, log, project?.id);
}
});
agentManager.on('error', (taskId: string, error: string) => {
const mainWindow = getMainWindow();
if (mainWindow) {
- mainWindow.webContents.send(IPC_CHANNELS.TASK_ERROR, taskId, error);
+ // Include projectId for multi-project filtering (issue #723)
+ const { project } = findTaskAndProject(taskId);
+ mainWindow.webContents.send(IPC_CHANNELS.TASK_ERROR, taskId, error, project?.id);
}
});
@@ -61,11 +117,15 @@ export function registerAgenteventsHandlers(
agentManager.on('exit', (taskId: string, code: number | null, processType: ProcessType) => {
const mainWindow = getMainWindow();
if (mainWindow) {
+ // Get project info early for multi-project filtering (issue #723)
+ const { project: exitProject } = findTaskAndProject(taskId);
+ const exitProjectId = exitProject?.id;
+
// Send final plan state to renderer BEFORE unwatching
// This ensures the renderer has the final subtask data (fixes 0/0 subtask bug)
const finalPlan = fileWatcher.getCurrentPlan(taskId);
if (finalPlan) {
- mainWindow.webContents.send(IPC_CHANNELS.TASK_PROGRESS, taskId, finalPlan);
+ mainWindow.webContents.send(IPC_CHANNELS.TASK_PROGRESS, taskId, finalPlan, exitProjectId);
}
fileWatcher.unwatch(taskId);
@@ -81,6 +141,12 @@ export function registerAgenteventsHandlers(
try {
const projects = projectStore.getProjects();
+ // IMPORTANT: Invalidate cache for all projects to ensure we get fresh data
+ // This prevents race conditions where cached task data has stale status
+ for (const p of projects) {
+ projectStore.invalidateTasksCache(p.id);
+ }
+
for (const p of projects) {
const tasks = projectStore.getTasks(p.id);
task = tasks.find((t) => t.id === taskId || t.specId === taskId);
@@ -92,42 +158,79 @@ export function registerAgenteventsHandlers(
if (task && project) {
const taskTitle = task.title || task.specId;
- const planPath = getPlanPath(project, task);
+ const mainPlanPath = getPlanPath(project, task);
+ const projectId = project.id; // Capture for closure
+
+ // Capture task values for closure
+ const taskSpecId = task.specId;
+ const projectPath = project.path;
+ const autoBuildPath = project.autoBuildPath;
// Use shared utility for persisting status (prevents race conditions)
+ // Persist to both main project AND worktree (if exists) for consistency
const persistStatus = (status: TaskStatus) => {
- const persisted = persistPlanStatusSync(planPath, status);
- if (persisted) {
- console.log(`[Task ${taskId}] Persisted status to plan: ${status}`);
+ // Persist to main project
+ const mainPersisted = persistPlanStatusSync(mainPlanPath, status, projectId);
+ if (mainPersisted) {
+ console.warn(`[Task ${taskId}] Persisted status to main plan: ${status}`);
+ }
+
+ // Also persist to worktree if it exists
+ const worktreePath = findTaskWorktree(projectPath, taskSpecId);
+ if (worktreePath) {
+ const specsBaseDir = getSpecsDir(autoBuildPath);
+ const worktreePlanPath = path.join(
+ worktreePath,
+ specsBaseDir,
+ taskSpecId,
+ AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN
+ );
+ if (existsSync(worktreePlanPath)) {
+ const worktreePersisted = persistPlanStatusSync(worktreePlanPath, status, projectId);
+ if (worktreePersisted) {
+ console.warn(`[Task ${taskId}] Persisted status to worktree plan: ${status}`);
+ }
+ }
}
};
if (code === 0) {
notificationService.notifyReviewNeeded(taskTitle, project.id, taskId);
-
+
// Fallback: Ensure status is updated even if COMPLETE phase event was missed
// This prevents tasks from getting stuck in ai_review status
- // Uses inverted logic to also handle tasks with no subtasks (treats them as complete)
+ // FIX (ACS-71): Only move to human_review if subtasks exist AND are all completed
+ // If no subtasks exist, the task is still in planning and shouldn't move to human_review
const isActiveStatus = task.status === 'in_progress' || task.status === 'ai_review';
- const hasIncompleteSubtasks = task.subtasks && task.subtasks.length > 0 &&
+ const hasSubtasks = task.subtasks && task.subtasks.length > 0;
+ const hasIncompleteSubtasks = hasSubtasks &&
task.subtasks.some((s) => s.status !== 'completed');
-
- if (isActiveStatus && !hasIncompleteSubtasks) {
- console.log(`[Task ${taskId}] Fallback: Moving to human_review (process exited successfully)`);
+
+ if (isActiveStatus && hasSubtasks && !hasIncompleteSubtasks) {
+ // All subtasks completed - safe to move to human_review
+ console.warn(`[Task ${taskId}] Fallback: Moving to human_review (process exited successfully, all ${task.subtasks.length} subtasks completed)`);
persistStatus('human_review');
+ // Include projectId for multi-project filtering (issue #723)
mainWindow.webContents.send(
IPC_CHANNELS.TASK_STATUS_CHANGE,
taskId,
- 'human_review' as TaskStatus
+ 'human_review' as TaskStatus,
+ projectId
);
+ } else if (isActiveStatus && !hasSubtasks) {
+ // No subtasks yet - task is still in planning phase, don't change status
+ // This prevents the bug where tasks jump to human_review before planning completes
+ console.warn(`[Task ${taskId}] Process exited but no subtasks created yet - keeping current status (${task.status})`);
}
} else {
notificationService.notifyTaskFailed(taskTitle, project.id, taskId);
persistStatus('human_review');
+ // Include projectId for multi-project filtering (issue #723)
mainWindow.webContents.send(
IPC_CHANNELS.TASK_STATUS_CHANGE,
taskId,
- 'human_review' as TaskStatus
+ 'human_review' as TaskStatus,
+ projectId
);
}
}
@@ -140,7 +243,12 @@ export function registerAgenteventsHandlers(
agentManager.on('execution-progress', (taskId: string, progress: ExecutionProgressData) => {
const mainWindow = getMainWindow();
if (mainWindow) {
- mainWindow.webContents.send(IPC_CHANNELS.TASK_EXECUTION_PROGRESS, taskId, progress);
+ // Use shared helper to find task and project (issue #723 - deduplicate lookup)
+ const { task, project } = findTaskAndProject(taskId);
+ const taskProjectId = project?.id;
+
+ // Include projectId in execution progress event for multi-project filtering
+ mainWindow.webContents.send(IPC_CHANNELS.TASK_EXECUTION_PROGRESS, taskId, progress, taskProjectId);
const phaseToStatus: Record = {
'idle': null,
@@ -153,31 +261,47 @@ export function registerAgenteventsHandlers(
};
const newStatus = phaseToStatus[progress.phase];
- if (newStatus) {
+ // FIX (ACS-55, ACS-71): Validate status transition before sending/persisting
+ if (newStatus && validateStatusTransition(task, newStatus, progress.phase)) {
+ // Include projectId in status change event for multi-project filtering
mainWindow.webContents.send(
IPC_CHANNELS.TASK_STATUS_CHANGE,
taskId,
- newStatus
+ newStatus,
+ taskProjectId
);
- // CRITICAL: Persist status to plan file to prevent flip-flop on task list refresh
+ // CRITICAL: Persist status to plan file(s) to prevent flip-flop on task list refresh
// When getTasks() is called, it reads status from the plan file. Without persisting,
// the status in the file might differ from the UI, causing inconsistent state.
// Uses shared utility with locking to prevent race conditions.
- try {
- const projects = projectStore.getProjects();
- for (const p of projects) {
- const tasks = projectStore.getTasks(p.id);
- const task = tasks.find((t) => t.id === taskId || t.specId === taskId);
- if (task) {
- const planPath = getPlanPath(p, task);
- persistPlanStatusSync(planPath, newStatus);
- break;
+ // IMPORTANT: We persist to BOTH main project AND worktree (if exists) to ensure
+ // consistency, since getTasks() prefers the worktree version.
+ if (task && project) {
+ try {
+ // Persist to main project plan file
+ const mainPlanPath = getPlanPath(project, task);
+ persistPlanStatusSync(mainPlanPath, newStatus, project.id);
+
+ // Also persist to worktree plan file if it exists
+ // This ensures consistency since getTasks() prefers worktree version
+ const worktreePath = findTaskWorktree(project.path, task.specId);
+ if (worktreePath) {
+ const specsBaseDir = getSpecsDir(project.autoBuildPath);
+ const worktreePlanPath = path.join(
+ worktreePath,
+ specsBaseDir,
+ task.specId,
+ AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN
+ );
+ if (existsSync(worktreePlanPath)) {
+ persistPlanStatusSync(worktreePlanPath, newStatus, project.id);
+ }
}
+ } catch (err) {
+ // Ignore persistence errors - UI will still work, just might flip on refresh
+ console.warn('[execution-progress] Could not persist status:', err);
}
- } catch (err) {
- // Ignore persistence errors - UI will still work, just might flip on refresh
- console.warn('[execution-progress] Could not persist status:', err);
}
}
}
@@ -190,14 +314,18 @@ export function registerAgenteventsHandlers(
fileWatcher.on('progress', (taskId: string, plan: ImplementationPlan) => {
const mainWindow = getMainWindow();
if (mainWindow) {
- mainWindow.webContents.send(IPC_CHANNELS.TASK_PROGRESS, taskId, plan);
+ // Use shared helper to find project (issue #723 - deduplicate lookup)
+ const { project } = findTaskAndProject(taskId);
+ mainWindow.webContents.send(IPC_CHANNELS.TASK_PROGRESS, taskId, plan, project?.id);
}
});
fileWatcher.on('error', (taskId: string, error: string) => {
const mainWindow = getMainWindow();
if (mainWindow) {
- mainWindow.webContents.send(IPC_CHANNELS.TASK_ERROR, taskId, error);
+ // Include projectId for multi-project filtering (issue #723)
+ const { project } = findTaskAndProject(taskId);
+ mainWindow.webContents.send(IPC_CHANNELS.TASK_ERROR, taskId, error, project?.id);
}
});
}
diff --git a/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts b/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts
index 1d0b963efc..66c7f3ee3d 100644
--- a/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/app-update-handlers.ts
@@ -11,6 +11,7 @@ import type { IPCResult, AppUpdateInfo } from '../../shared/types';
import {
checkForUpdates,
downloadUpdate,
+ downloadStableVersion,
quitAndInstall,
getCurrentVersion
} from '../app-updater';
@@ -65,6 +66,26 @@ export function registerAppUpdateHandlers(): void {
}
);
+ /**
+ * APP_UPDATE_DOWNLOAD_STABLE: Download stable version (for downgrade from beta)
+ * Uses allowDowngrade to download an older stable version
+ */
+ ipcMain.handle(
+ IPC_CHANNELS.APP_UPDATE_DOWNLOAD_STABLE,
+ async (): Promise => {
+ try {
+ await downloadStableVersion();
+ return { success: true };
+ } catch (error) {
+ console.error('[app-update-handlers] Download stable version failed:', error);
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to download stable version'
+ };
+ }
+ }
+ );
+
/**
* APP_UPDATE_INSTALL: Quit and install update
* Quits the app and installs the downloaded update
diff --git a/apps/frontend/src/main/ipc-handlers/autobuild-source-handlers.ts b/apps/frontend/src/main/ipc-handlers/autobuild-source-handlers.ts
deleted file mode 100644
index 4a4ab66d82..0000000000
--- a/apps/frontend/src/main/ipc-handlers/autobuild-source-handlers.ts
+++ /dev/null
@@ -1,321 +0,0 @@
-import { ipcMain } from 'electron';
-import type { BrowserWindow } from 'electron';
-import { IPC_CHANNELS } from '../../shared/constants';
-import type { IPCResult } from '../../shared/types';
-import path from 'path';
-import { existsSync, readFileSync, writeFileSync } from 'fs';
-import type { AutoBuildSourceUpdateProgress, SourceEnvConfig, SourceEnvCheckResult } from '../../shared/types';
-import { checkForUpdates as checkSourceUpdates, downloadAndApplyUpdate, getBundledVersion, getEffectiveVersion, getEffectiveSourcePath } from '../auto-claude-updater';
-import { debugLog } from '../../shared/utils/debug-logger';
-
-
-/**
- * Register all autobuild-source-related IPC handlers
- */
-export function registerAutobuildSourceHandlers(
- getMainWindow: () => BrowserWindow | null
-): void {
- // ============================================
- // Auto Claude Source Update Operations
- // ============================================
-
- ipcMain.handle(
- IPC_CHANNELS.AUTOBUILD_SOURCE_CHECK,
- async (): Promise> => {
- console.log('[autobuild-source] Check for updates called');
- debugLog('[IPC] AUTOBUILD_SOURCE_CHECK called');
- try {
- const result = await checkSourceUpdates();
- console.log('[autobuild-source] Check result:', JSON.stringify(result, null, 2));
- debugLog('[IPC] AUTOBUILD_SOURCE_CHECK result:', result);
- return { success: true, data: result };
- } catch (error) {
- console.error('[autobuild-source] Check error:', error);
- debugLog('[IPC] AUTOBUILD_SOURCE_CHECK error:', error);
- return {
- success: false,
- error: error instanceof Error ? error.message : 'Failed to check for updates'
- };
- }
- }
- );
-
- ipcMain.on(
- IPC_CHANNELS.AUTOBUILD_SOURCE_DOWNLOAD,
- () => {
- debugLog('[IPC] Autobuild source download requested');
- const mainWindow = getMainWindow();
- if (!mainWindow) {
- debugLog('[IPC] No main window available, aborting update');
- return;
- }
-
- // Start download in background
- downloadAndApplyUpdate((progress) => {
- debugLog('[IPC] Update progress:', progress.stage, progress.message);
- mainWindow.webContents.send(
- IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS,
- progress
- );
- }).then((result) => {
- if (result.success) {
- debugLog('[IPC] Update completed successfully, version:', result.version);
- mainWindow.webContents.send(
- IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS,
- {
- stage: 'complete',
- message: `Updated to version ${result.version}`,
- newVersion: result.version // Include new version for UI refresh
- } as AutoBuildSourceUpdateProgress
- );
- } else {
- debugLog('[IPC] Update failed:', result.error);
- mainWindow.webContents.send(
- IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS,
- {
- stage: 'error',
- message: result.error || 'Update failed'
- } as AutoBuildSourceUpdateProgress
- );
- }
- }).catch((error) => {
- debugLog('[IPC] Update error:', error instanceof Error ? error.message : error);
- mainWindow.webContents.send(
- IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS,
- {
- stage: 'error',
- message: error instanceof Error ? error.message : 'Update failed'
- } as AutoBuildSourceUpdateProgress
- );
- });
-
- // Send initial progress
- mainWindow.webContents.send(
- IPC_CHANNELS.AUTOBUILD_SOURCE_PROGRESS,
- {
- stage: 'checking',
- message: 'Starting update...'
- } as AutoBuildSourceUpdateProgress
- );
- }
- );
-
- ipcMain.handle(
- IPC_CHANNELS.AUTOBUILD_SOURCE_VERSION,
- async (): Promise> => {
- try {
- // Use effective version which accounts for source updates
- const version = getEffectiveVersion();
- debugLog('[IPC] Returning effective version:', version);
- return { success: true, data: version };
- } catch (error) {
- return {
- success: false,
- error: error instanceof Error ? error.message : 'Failed to get version'
- };
- }
- }
- );
-
- // ============================================
- // Auto Claude Source Environment Operations
- // ============================================
-
- /**
- * Parse an .env file content into a key-value object
- */
- const parseSourceEnvFile = (content: string): Record => {
- const vars: Record = {};
- for (const line of content.split('\n')) {
- const trimmed = line.trim();
- if (!trimmed || trimmed.startsWith('#')) continue;
-
- const eqIndex = trimmed.indexOf('=');
- if (eqIndex > 0) {
- const key = trimmed.substring(0, eqIndex).trim();
- let value = trimmed.substring(eqIndex + 1).trim();
- // Remove quotes if present
- if ((value.startsWith('"') && value.endsWith('"')) ||
- (value.startsWith("'") && value.endsWith("'"))) {
- value = value.slice(1, -1);
- }
- vars[key] = value;
- }
- }
- return vars;
- };
-
- ipcMain.handle(
- IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_GET,
- async (): Promise> => {
- try {
- const sourcePath = getEffectiveSourcePath();
- if (!sourcePath) {
- return {
- success: true,
- data: {
- hasClaudeToken: false,
- envExists: false,
- sourcePath: undefined
- }
- };
- }
-
- const envPath = path.join(sourcePath, '.env');
- const envExists = existsSync(envPath);
-
- if (!envExists) {
- return {
- success: true,
- data: {
- hasClaudeToken: false,
- envExists: false,
- sourcePath
- }
- };
- }
-
- const content = readFileSync(envPath, 'utf-8');
- const vars = parseSourceEnvFile(content);
- const hasToken = !!vars['CLAUDE_CODE_OAUTH_TOKEN'];
-
- return {
- success: true,
- data: {
- hasClaudeToken: hasToken,
- claudeOAuthToken: hasToken ? vars['CLAUDE_CODE_OAUTH_TOKEN'] : undefined,
- envExists: true,
- sourcePath
- }
- };
- } catch (error) {
- return {
- success: false,
- error: error instanceof Error ? error.message : 'Failed to get source env'
- };
- }
- }
- );
-
- ipcMain.handle(
- IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_UPDATE,
- async (_, config: { claudeOAuthToken?: string }): Promise => {
- try {
- const sourcePath = getEffectiveSourcePath();
- if (!sourcePath) {
- return {
- success: false,
- error: 'Auto-Claude source path not found. Please configure it in App Settings.'
- };
- }
-
- const envPath = path.join(sourcePath, '.env');
-
- // Read existing content or start fresh
- let existingContent = '';
- const existingVars: Record = {};
-
- if (existsSync(envPath)) {
- existingContent = readFileSync(envPath, 'utf-8');
- Object.assign(existingVars, parseSourceEnvFile(existingContent));
- }
-
- // Update the token
- if (config.claudeOAuthToken !== undefined) {
- existingVars['CLAUDE_CODE_OAUTH_TOKEN'] = config.claudeOAuthToken;
- }
-
- // Rebuild the .env file preserving comments and structure
- const lines = existingContent.split('\n');
- const processedKeys = new Set();
- const outputLines: string[] = [];
-
- for (const line of lines) {
- const trimmed = line.trim();
- if (!trimmed || trimmed.startsWith('#')) {
- outputLines.push(line);
- continue;
- }
-
- const eqIndex = trimmed.indexOf('=');
- if (eqIndex > 0) {
- const key = trimmed.substring(0, eqIndex).trim();
- if (key in existingVars) {
- outputLines.push(`${key}=${existingVars[key]}`);
- processedKeys.add(key);
- } else {
- outputLines.push(line);
- }
- } else {
- outputLines.push(line);
- }
- }
-
- // Add any new keys that weren't in the original file
- for (const [key, value] of Object.entries(existingVars)) {
- if (!processedKeys.has(key)) {
- outputLines.push(`${key}=${value}`);
- }
- }
-
- writeFileSync(envPath, outputLines.join('\n'));
-
- return { success: true };
- } catch (error) {
- return {
- success: false,
- error: error instanceof Error ? error.message : 'Failed to update source env'
- };
- }
- }
- );
-
- ipcMain.handle(
- IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN,
- async (): Promise> => {
- try {
- const sourcePath = getEffectiveSourcePath();
- if (!sourcePath) {
- return {
- success: true,
- data: {
- hasToken: false,
- sourcePath: undefined,
- error: 'Auto-Claude source path not found'
- }
- };
- }
-
- const envPath = path.join(sourcePath, '.env');
- if (!existsSync(envPath)) {
- return {
- success: true,
- data: {
- hasToken: false,
- sourcePath,
- error: '.env file does not exist'
- }
- };
- }
-
- const content = readFileSync(envPath, 'utf-8');
- const vars = parseSourceEnvFile(content);
- const hasToken = !!vars['CLAUDE_CODE_OAUTH_TOKEN'] && vars['CLAUDE_CODE_OAUTH_TOKEN'].length > 0;
-
- return {
- success: true,
- data: {
- hasToken,
- sourcePath
- }
- };
- } catch (error) {
- return {
- success: false,
- error: error instanceof Error ? error.message : 'Failed to check source token'
- };
- }
- }
- );
-
-}
diff --git a/apps/frontend/src/main/ipc-handlers/context/utils.ts b/apps/frontend/src/main/ipc-handlers/context/utils.ts
index c815751778..6611e99740 100644
--- a/apps/frontend/src/main/ipc-handlers/context/utils.ts
+++ b/apps/frontend/src/main/ipc-handlers/context/utils.ts
@@ -131,7 +131,7 @@ export interface EmbeddingValidationResult {
/**
* Validate embedding configuration based on the configured provider
* Supports: openai, ollama, google, voyage, azure_openai
- *
+ *
* @returns validation result with provider info and reason if invalid
*/
export function validateEmbeddingConfiguration(
diff --git a/apps/frontend/src/main/ipc-handlers/env-handlers.ts b/apps/frontend/src/main/ipc-handlers/env-handlers.ts
index 9574215b9e..99ab0790c4 100644
--- a/apps/frontend/src/main/ipc-handlers/env-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/env-handlers.ts
@@ -8,6 +8,8 @@ import { existsSync, readFileSync, writeFileSync } from 'fs';
import { spawn } from 'child_process';
import { projectStore } from '../project-store';
import { parseEnvFile } from './utils';
+import { getClaudeCliInvocation, getClaudeCliInvocationAsync } from '../claude-cli-utils';
+import { debugError } from '../../shared/utils/debug-logger';
// GitLab environment variable keys
const GITLAB_ENV_KEYS = {
@@ -25,6 +27,43 @@ function envLine(vars: Record, key: string, defaultVal: string =
return vars[key] ? `${key}=${vars[key]}` : `# ${key}=${defaultVal}`;
}
+type ResolvedClaudeCliInvocation =
+ | { command: string; env: Record }
+ | { error: string };
+
+function resolveClaudeCliInvocation(): ResolvedClaudeCliInvocation {
+ try {
+ const invocation = getClaudeCliInvocation();
+ if (!invocation?.command) {
+ throw new Error('Claude CLI path not resolved');
+ }
+ return { command: invocation.command, env: invocation.env };
+ } catch (error) {
+ debugError('[IPC] Failed to resolve Claude CLI path:', error);
+ return {
+ error: error instanceof Error ? error.message : 'Failed to resolve Claude CLI path',
+ };
+ }
+}
+
+/**
+ * Async version of resolveClaudeCliInvocation - non-blocking for main process
+ */
+async function resolveClaudeCliInvocationAsync(): Promise {
+ try {
+ const invocation = await getClaudeCliInvocationAsync();
+ if (!invocation?.command) {
+ throw new Error('Claude CLI path not resolved');
+ }
+ return { command: invocation.command, env: invocation.env };
+ } catch (error) {
+ debugError('[IPC] Failed to resolve Claude CLI path:', error);
+ return {
+ error: error instanceof Error ? error.message : 'Failed to resolve Claude CLI path',
+ };
+ }
+}
+
/**
* Register all env-related IPC handlers
@@ -552,13 +591,21 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_
return { success: false, error: 'Project not found' };
}
+ // Use async version to avoid blocking main process during CLI detection
+ const resolved = await resolveClaudeCliInvocationAsync();
+ if ('error' in resolved) {
+ return { success: false, error: resolved.error };
+ }
+ const claudeCmd = resolved.command;
+ const claudeEnv = resolved.env;
+
try {
// Check if Claude CLI is available and authenticated
const result = await new Promise((resolve) => {
- const proc = spawn('claude', ['--version'], {
+ const proc = spawn(claudeCmd, ['--version'], {
cwd: project.path,
- env: { ...process.env },
- shell: true
+ env: claudeEnv,
+ shell: false
});
let _stdout = '';
@@ -576,10 +623,10 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_
if (code === 0) {
// Claude CLI is available, check if authenticated
// Run a simple command that requires auth
- const authCheck = spawn('claude', ['api', '--help'], {
+ const authCheck = spawn(claudeCmd, ['api', '--help'], {
cwd: project.path,
- env: { ...process.env },
- shell: true
+ env: claudeEnv,
+ shell: false
});
authCheck.on('close', (authCode: number | null) => {
@@ -614,6 +661,9 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_
});
});
+ if (!result.success) {
+ return { success: false, error: result.error || 'Failed to check Claude auth' };
+ }
return { success: true, data: result };
} catch (error) {
return {
@@ -632,13 +682,21 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_
return { success: false, error: 'Project not found' };
}
+ // Use async version to avoid blocking main process during CLI detection
+ const resolved = await resolveClaudeCliInvocationAsync();
+ if ('error' in resolved) {
+ return { success: false, error: resolved.error };
+ }
+ const claudeCmd = resolved.command;
+ const claudeEnv = resolved.env;
+
try {
// Run claude setup-token which will open browser for OAuth
const result = await new Promise((resolve) => {
- const proc = spawn('claude', ['setup-token'], {
+ const proc = spawn(claudeCmd, ['setup-token'], {
cwd: project.path,
- env: { ...process.env },
- shell: true,
+ env: claudeEnv,
+ shell: false,
stdio: 'inherit' // This allows the terminal to handle the interactive auth
});
@@ -666,6 +724,9 @@ ${existingVars['GRAPHITI_DB_PATH'] ? `GRAPHITI_DB_PATH=${existingVars['GRAPHITI_
});
});
+ if (!result.success) {
+ return { success: false, error: result.error || 'Failed to invoke Claude setup' };
+ }
return { success: true, data: result };
} catch (error) {
return {
diff --git a/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts b/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts
index 616106675d..4c3c942f7e 100644
--- a/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts
+++ b/apps/frontend/src/main/ipc-handlers/github/__tests__/oauth-handlers.spec.ts
@@ -10,11 +10,15 @@ const mockSpawn = vi.fn();
const mockExecSync = vi.fn();
const mockExecFileSync = vi.fn();
-vi.mock('child_process', () => ({
- spawn: (...args: unknown[]) => mockSpawn(...args),
- execSync: (...args: unknown[]) => mockExecSync(...args),
- execFileSync: (...args: unknown[]) => mockExecFileSync(...args)
-}));
+vi.mock('child_process', async (importOriginal) => {
+ const actual = await importOriginal();
+ return {
+ ...actual,
+ spawn: (...args: unknown[]) => mockSpawn(...args),
+ execSync: (...args: unknown[]) => mockExecSync(...args),
+ execFileSync: (...args: unknown[]) => mockExecFileSync(...args)
+ };
+});
// Mock shell.openExternal
const mockOpenExternal = vi.fn();
@@ -82,6 +86,13 @@ vi.mock('../../../env-utils', () => ({
isCommandAvailable: vi.fn((cmd: string) => mockFindExecutable(cmd) !== null)
}));
+// Mock cli-tool-manager to avoid child_process import issues
+vi.mock('../../../cli-tool-manager', () => ({
+ getToolPath: vi.fn(() => '/usr/local/bin/gh'),
+ detectCLITools: vi.fn(),
+ getAllToolStatus: vi.fn()
+}));
+
// Create mock process for spawn
function createMockProcess(): EventEmitter & {
stdout: EventEmitter | null;
diff --git a/apps/frontend/src/main/ipc-handlers/github/__tests__/runner-env-handlers.test.ts b/apps/frontend/src/main/ipc-handlers/github/__tests__/runner-env-handlers.test.ts
new file mode 100644
index 0000000000..751578da7f
--- /dev/null
+++ b/apps/frontend/src/main/ipc-handlers/github/__tests__/runner-env-handlers.test.ts
@@ -0,0 +1,260 @@
+import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
+import fs from 'fs';
+import os from 'os';
+import path from 'path';
+import type { Project } from '../../../../shared/types';
+import { IPC_CHANNELS } from '../../../../shared/constants';
+import type { BrowserWindow } from 'electron';
+import type { AgentManager } from '../../../agent/agent-manager';
+import type { createIPCCommunicators as createIPCCommunicatorsType } from '../utils/ipc-communicator';
+
+const mockIpcMain = vi.hoisted(() => {
+ class HoistedMockIpcMain {
+ handlers = new Map();
+ listeners = new Map();
+
+ handle(channel: string, handler: Function): void {
+ this.handlers.set(channel, handler);
+ }
+
+ on(channel: string, listener: Function): void {
+ this.listeners.set(channel, listener);
+ }
+
+ async invokeHandler(channel: string, ...args: unknown[]): Promise {
+ const handler = this.handlers.get(channel);
+ if (!handler) {
+ throw new Error(`No handler for channel: ${channel}`);
+ }
+ return handler({}, ...args);
+ }
+
+ async emit(channel: string, ...args: unknown[]): Promise {
+ const listener = this.listeners.get(channel);
+ if (!listener) {
+ throw new Error(`No listener for channel: ${channel}`);
+ }
+ await listener({}, ...args);
+ }
+
+ reset(): void {
+ this.handlers.clear();
+ this.listeners.clear();
+ }
+ }
+
+ return new HoistedMockIpcMain();
+});
+
+const mockRunPythonSubprocess = vi.fn();
+const mockValidateGitHubModule = vi.fn();
+const mockGetRunnerEnv = vi.fn();
+type CreateIPCCommunicators = typeof createIPCCommunicatorsType;
+
+const mockCreateIPCCommunicators = vi.fn(
+ (..._args: Parameters) => ({
+ sendProgress: vi.fn(),
+ sendComplete: vi.fn(),
+ sendError: vi.fn(),
+ })
+) as unknown as CreateIPCCommunicators;
+
+const projectRef: { current: Project | null } = { current: null };
+const tempDirs: string[] = [];
+
+vi.mock('electron', () => ({
+ ipcMain: mockIpcMain,
+ BrowserWindow: class {},
+ app: {
+ getPath: vi.fn(() => '/tmp'),
+ on: vi.fn(),
+ },
+}));
+
+vi.mock('../../../agent/agent-manager', () => ({
+ AgentManager: class {
+ startSpecCreation = vi.fn();
+ },
+}));
+
+vi.mock('../utils/ipc-communicator', () => ({
+ createIPCCommunicators: (...args: Parameters) =>
+ mockCreateIPCCommunicators(...args),
+}));
+
+vi.mock('../utils/project-middleware', () => ({
+ withProjectOrNull: async (_projectId: string, handler: (project: Project) => Promise) => {
+ if (!projectRef.current) {
+ return null;
+ }
+ return handler(projectRef.current);
+ },
+}));
+
+vi.mock('../utils/subprocess-runner', () => ({
+ runPythonSubprocess: (...args: unknown[]) => mockRunPythonSubprocess(...args),
+ validateGitHubModule: (...args: unknown[]) => mockValidateGitHubModule(...args),
+ getPythonPath: () => '/tmp/python',
+ getRunnerPath: () => '/tmp/runner.py',
+ buildRunnerArgs: (_runnerPath: string, _projectPath: string, command: string, args: string[] = []) => [
+ 'runner.py',
+ command,
+ ...args,
+ ],
+}));
+
+vi.mock('../utils/runner-env', () => ({
+ getRunnerEnv: (...args: unknown[]) => mockGetRunnerEnv(...args),
+}));
+
+vi.mock('../utils', () => ({
+ getGitHubConfig: vi.fn(() => null),
+ githubFetch: vi.fn(),
+}));
+
+vi.mock('../../../settings-utils', () => ({
+ readSettingsFile: vi.fn(() => ({})),
+}));
+
+function createMockWindow(): BrowserWindow {
+ return { webContents: { send: vi.fn() } } as unknown as BrowserWindow;
+}
+
+function createProject(): Project {
+ const projectPath = fs.mkdtempSync(path.join(os.tmpdir(), 'github-env-test-'));
+ tempDirs.push(projectPath);
+ return {
+ id: 'project-1',
+ name: 'Test Project',
+ path: projectPath,
+ autoBuildPath: '.auto-claude',
+ settings: {
+ model: 'default',
+ memoryBackend: 'file',
+ linearSync: false,
+ notifications: {
+ onTaskComplete: false,
+ onTaskFailed: false,
+ onReviewNeeded: false,
+ sound: false,
+ },
+ graphitiMcpEnabled: false,
+ useClaudeMd: true,
+ },
+ createdAt: new Date(),
+ updatedAt: new Date(),
+ };
+}
+
+describe('GitHub runner env usage', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ mockIpcMain.reset();
+ projectRef.current = createProject();
+ mockValidateGitHubModule.mockResolvedValue({ valid: true, backendPath: '/tmp/backend' });
+ mockGetRunnerEnv.mockResolvedValue({ ANTHROPIC_AUTH_TOKEN: 'token' });
+ });
+
+ afterEach(() => {
+ for (const dir of tempDirs) {
+ try {
+ fs.rmSync(dir, { recursive: true, force: true });
+ } catch {
+ // Ignore cleanup errors for already-removed temp dirs.
+ }
+ }
+ tempDirs.length = 0;
+ });
+
+ it('passes runner env to PR review subprocess', async () => {
+ const { registerPRHandlers } = await import('../pr-handlers');
+
+ mockRunPythonSubprocess.mockReturnValue({
+ process: { pid: 123 },
+ promise: Promise.resolve({
+ success: true,
+ exitCode: 0,
+ stdout: '',
+ stderr: '',
+ data: {
+ prNumber: 123,
+ repo: 'test/repo',
+ success: true,
+ findings: [],
+ summary: '',
+ overallStatus: 'comment',
+ reviewedAt: new Date().toISOString(),
+ },
+ }),
+ });
+
+ registerPRHandlers(() => createMockWindow());
+ await mockIpcMain.emit(IPC_CHANNELS.GITHUB_PR_REVIEW, projectRef.current?.id, 123);
+
+ expect(mockGetRunnerEnv).toHaveBeenCalledWith({ USE_CLAUDE_MD: 'true' });
+ expect(mockRunPythonSubprocess).toHaveBeenCalledWith(
+ expect.objectContaining({
+ env: { ANTHROPIC_AUTH_TOKEN: 'token' },
+ })
+ );
+ });
+
+ it('passes runner env to triage subprocess', async () => {
+ const { registerTriageHandlers } = await import('../triage-handlers');
+
+ mockRunPythonSubprocess.mockReturnValue({
+ process: { pid: 124 },
+ promise: Promise.resolve({
+ success: true,
+ exitCode: 0,
+ stdout: '',
+ stderr: '',
+ data: [],
+ }),
+ });
+
+ registerTriageHandlers(() => createMockWindow());
+ await mockIpcMain.emit(IPC_CHANNELS.GITHUB_TRIAGE_RUN, projectRef.current?.id);
+
+ expect(mockGetRunnerEnv).toHaveBeenCalledWith();
+ expect(mockRunPythonSubprocess).toHaveBeenCalledWith(
+ expect.objectContaining({
+ env: { ANTHROPIC_AUTH_TOKEN: 'token' },
+ })
+ );
+ });
+
+ it('passes runner env to autofix analyze preview subprocess', async () => {
+ const { registerAutoFixHandlers } = await import('../autofix-handlers');
+ const { AgentManager: MockedAgentManager } = await import('../../../agent/agent-manager');
+
+ mockRunPythonSubprocess.mockReturnValue({
+ process: { pid: 125 },
+ promise: Promise.resolve({
+ success: true,
+ exitCode: 0,
+ stdout: '',
+ stderr: '',
+ data: {
+ totalIssues: 0,
+ primaryIssue: null,
+ proposedBatches: [],
+ singleIssues: [],
+ },
+ }),
+ });
+
+ const agentManager: AgentManager = new MockedAgentManager();
+ const getMainWindow: () => BrowserWindow | null = () => createMockWindow();
+
+ registerAutoFixHandlers(agentManager, getMainWindow);
+ await mockIpcMain.emit(IPC_CHANNELS.GITHUB_AUTOFIX_ANALYZE_PREVIEW, projectRef.current?.id);
+
+ expect(mockGetRunnerEnv).toHaveBeenCalledWith();
+ expect(mockRunPythonSubprocess).toHaveBeenCalledWith(
+ expect.objectContaining({
+ env: { ANTHROPIC_AUTH_TOKEN: 'token' },
+ })
+ );
+ });
+});
diff --git a/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts
index 578ebace52..187eaa5d6b 100644
--- a/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/github/autofix-handlers.ts
@@ -28,6 +28,7 @@ import {
parseJSONFromOutput,
} from './utils/subprocess-runner';
import { AgentManager } from '../../agent/agent-manager';
+import { getRunnerEnv } from './utils/runner-env';
// Debug logging
const { debug: debugLog } = createContextLogger('GitHub AutoFix');
@@ -277,11 +278,13 @@ async function checkNewIssues(project: Project): Promise
const backendPath = validation.backendPath!;
const args = buildRunnerArgs(getRunnerPath(backendPath), project.path, 'check-new');
+ const subprocessEnv = await getRunnerEnv();
const { promise } = runPythonSubprocess>({
pythonPath: getPythonPath(backendPath),
args,
cwd: backendPath,
+ env: subprocessEnv,
onComplete: (stdout) => {
return parseJSONFromOutput>(stdout);
},
@@ -361,7 +364,15 @@ async function startAutoFix(
// Create spec
const taskDescription = buildInvestigationTask(issue.number, issue.title, issueContext);
- const specData = await createSpecForIssue(project, issue.number, issue.title, taskDescription, issue.html_url, labels);
+ const specData = await createSpecForIssue(
+ project,
+ issue.number,
+ issue.title,
+ taskDescription,
+ issue.html_url,
+ labels,
+ project.settings?.mainBranch // Pass project's configured main branch
+ );
// Save auto-fix state
const issuesDir = path.join(getGitHubDir(project), 'issues');
@@ -607,6 +618,7 @@ export function registerAutoFixHandlers(
const backendPath = validation.backendPath!;
const additionalArgs = issueNumbers && issueNumbers.length > 0 ? issueNumbers.map(n => n.toString()) : [];
const args = buildRunnerArgs(getRunnerPath(backendPath), project.path, 'batch-issues', additionalArgs);
+ const subprocessEnv = await getRunnerEnv();
debugLog('Spawning batch process', { args });
@@ -614,6 +626,7 @@ export function registerAutoFixHandlers(
pythonPath: getPythonPath(backendPath),
args,
cwd: backendPath,
+ env: subprocessEnv,
onProgress: (percent, message) => {
sendProgress({
phase: 'batching',
@@ -728,12 +741,14 @@ export function registerAutoFixHandlers(
}
const args = buildRunnerArgs(getRunnerPath(backendPath), project.path, 'analyze-preview', additionalArgs);
+ const subprocessEnv = await getRunnerEnv();
debugLog('Spawning analyze-preview process', { args });
const { promise } = runPythonSubprocess({
pythonPath: getPythonPath(backendPath),
args,
cwd: backendPath,
+ env: subprocessEnv,
onProgress: (percent, message) => {
sendProgress({ phase: 'analyzing', progress: percent, message });
},
diff --git a/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts
index 8a38619e79..9e2e5c0506 100644
--- a/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/github/import-handlers.ts
@@ -66,7 +66,8 @@ ${issue.body || 'No description provided.'}
issue.title,
description,
issue.html_url,
- labelNames
+ labelNames,
+ project.settings?.mainBranch // Pass project's configured main branch
);
// Start spec creation with the existing spec directory
diff --git a/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts
index 4f5a36d435..7ddae6e599 100644
--- a/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/github/investigation-handlers.ts
@@ -148,7 +148,8 @@ export function registerInvestigateIssue(
issue.title,
taskDescription,
issue.html_url,
- labels
+ labels,
+ project.settings?.mainBranch // Pass project's configured main branch
);
// NOTE: We intentionally do NOT call agentManager.startSpecCreation() here
diff --git a/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts
index 7f6b01f44a..d1dacecf0f 100644
--- a/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/github/pr-handlers.ts
@@ -8,25 +8,32 @@
* 4. Apply fixes
*/
-import { ipcMain } from 'electron';
-import type { BrowserWindow } from 'electron';
-import path from 'path';
-import fs from 'fs';
-import { IPC_CHANNELS, MODEL_ID_MAP, DEFAULT_FEATURE_MODELS, DEFAULT_FEATURE_THINKING } from '../../../shared/constants';
-import { getGitHubConfig, githubFetch } from './utils';
-import { readSettingsFile } from '../../settings-utils';
-import { getAugmentedEnv } from '../../env-utils';
-import type { Project, AppSettings } from '../../../shared/types';
-import { createContextLogger } from './utils/logger';
-import { withProjectOrNull } from './utils/project-middleware';
-import { createIPCCommunicators } from './utils/ipc-communicator';
+import { ipcMain } from "electron";
+import type { BrowserWindow } from "electron";
+import path from "path";
+import fs from "fs";
+import {
+ IPC_CHANNELS,
+ MODEL_ID_MAP,
+ DEFAULT_FEATURE_MODELS,
+ DEFAULT_FEATURE_THINKING,
+} from "../../../shared/constants";
+import { getGitHubConfig, githubFetch } from "./utils";
+import { readSettingsFile } from "../../settings-utils";
+import { getAugmentedEnv } from "../../env-utils";
+import { getMemoryService, getDefaultDbPath } from "../../memory-service";
+import type { Project, AppSettings } from "../../../shared/types";
+import { createContextLogger } from "./utils/logger";
+import { withProjectOrNull } from "./utils/project-middleware";
+import { createIPCCommunicators } from "./utils/ipc-communicator";
+import { getRunnerEnv } from "./utils/runner-env";
import {
runPythonSubprocess,
getPythonPath,
getRunnerPath,
validateGitHubModule,
buildRunnerArgs,
-} from './utils/subprocess-runner';
+} from "./utils/subprocess-runner";
/**
* Sanitize network data before writing to file
@@ -36,15 +43,34 @@ function sanitizeNetworkData(data: string, maxLength = 1000000): string {
// Remove null bytes and other control characters except newlines/tabs/carriage returns
// Using code points instead of escape sequences to avoid no-control-regex ESLint rule
const controlCharsPattern = new RegExp(
- '[' +
- String.fromCharCode(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08) + // \x00-\x08
- String.fromCharCode(0x0B, 0x0C) + // \x0B, \x0C (skip \x0A which is newline)
- String.fromCharCode(0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F) + // \x0E-\x1F
- String.fromCharCode(0x7F) + // \x7F (DEL)
- ']',
- 'g'
+ "[" +
+ String.fromCharCode(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08) + // \x00-\x08
+ String.fromCharCode(0x0b, 0x0c) + // \x0B, \x0C (skip \x0A which is newline)
+ String.fromCharCode(
+ 0x0e,
+ 0x0f,
+ 0x10,
+ 0x11,
+ 0x12,
+ 0x13,
+ 0x14,
+ 0x15,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f
+ ) + // \x0E-\x1F
+ String.fromCharCode(0x7f) + // \x7F (DEL)
+ "]",
+ "g"
);
- let sanitized = data.replace(controlCharsPattern, '');
+ let sanitized = data.replace(controlCharsPattern, "");
// Limit length to prevent DoS
if (sanitized.length > maxLength) {
@@ -55,13 +81,13 @@ function sanitizeNetworkData(data: string, maxLength = 1000000): string {
}
// Debug logging
-const { debug: debugLog } = createContextLogger('GitHub PR');
+const { debug: debugLog } = createContextLogger("GitHub PR");
/**
* Registry of running PR review processes
* Key format: `${projectId}:${prNumber}`
*/
-const runningReviews = new Map();
+const runningReviews = new Map();
/**
* Get the registry key for a PR review
@@ -70,13 +96,20 @@ function getReviewKey(projectId: string, prNumber: number): string {
return `${projectId}:${prNumber}`;
}
+/**
+ * Returns env vars for Claude.md usage; enabled unless explicitly opted out.
+ */
+function getClaudeMdEnv(project: Project): Record | undefined {
+ return project.settings?.useClaudeMd !== false ? { USE_CLAUDE_MD: "true" } : undefined;
+}
+
/**
* PR review finding from AI analysis
*/
export interface PRReviewFinding {
id: string;
- severity: 'critical' | 'high' | 'medium' | 'low';
- category: 'security' | 'quality' | 'style' | 'test' | 'docs' | 'pattern' | 'performance';
+ severity: "critical" | "high" | "medium" | "low";
+ category: "security" | "quality" | "style" | "test" | "docs" | "pattern" | "performance";
title: string;
description: string;
file: string;
@@ -95,12 +128,13 @@ export interface PRReviewResult {
success: boolean;
findings: PRReviewFinding[];
summary: string;
- overallStatus: 'approve' | 'request_changes' | 'comment';
+ overallStatus: "approve" | "request_changes" | "comment";
reviewId?: number;
reviewedAt: string;
error?: string;
// Follow-up review fields
reviewedCommitSha?: string;
+ reviewedFileBlobs?: Record; // filename → blob SHA for rebase-resistant follow-ups
isFollowupReview?: boolean;
previousReviewId?: number;
resolvedFindings?: string[];
@@ -124,6 +158,181 @@ export interface NewCommitsCheck {
hasCommitsAfterPosting?: boolean;
}
+/**
+ * Lightweight merge readiness check result
+ * Used for real-time validation of AI verdict freshness
+ */
+export interface MergeReadiness {
+ /** PR is in draft mode */
+ isDraft: boolean;
+ /** GitHub's mergeable status */
+ mergeable: "MERGEABLE" | "CONFLICTING" | "UNKNOWN";
+ /** Branch is behind base branch (out of date) */
+ isBehind: boolean;
+ /** Simplified CI status */
+ ciStatus: "passing" | "failing" | "pending" | "none";
+ /** List of blockers that contradict a "ready to merge" verdict */
+ blockers: string[];
+}
+
+/**
+ * PR review memory stored in the memory layer
+ * Represents key insights and learnings from a PR review
+ */
+export interface PRReviewMemory {
+ prNumber: number;
+ repo: string;
+ verdict: string;
+ timestamp: string;
+ summary: {
+ verdict: string;
+ verdict_reasoning?: string;
+ finding_counts?: Record;
+ total_findings?: number;
+ blockers?: string[];
+ risk_assessment?: Record;
+ };
+ keyFindings: Array<{
+ severity: string;
+ category: string;
+ title: string;
+ description: string;
+ file: string;
+ line: number;
+ }>;
+ patterns: string[];
+ gotchas: string[];
+ isFollowup: boolean;
+}
+
+/**
+ * Save PR review insights to the Electron memory layer (LadybugDB)
+ *
+ * Called after a PR review completes to persist learnings for cross-session context.
+ * Extracts key findings, patterns, and gotchas from the review result.
+ *
+ * @param result The completed PR review result
+ * @param repo Repository name (owner/repo)
+ * @param isFollowup Whether this is a follow-up review
+ */
+async function savePRReviewToMemory(
+ result: PRReviewResult,
+ repo: string,
+ isFollowup: boolean = false
+): Promise {
+ const settings = readSettingsFile();
+ if (!settings?.memoryEnabled) {
+ debugLog("Memory not enabled, skipping PR review memory save");
+ return;
+ }
+
+ try {
+ const memoryService = getMemoryService({
+ dbPath: getDefaultDbPath(),
+ database: "auto_claude_memory",
+ });
+
+ // Build the memory content with comprehensive insights
+ // We want to capture ALL meaningful findings so the AI can learn from patterns
+
+ // Prioritize findings: critical > high > medium > low
+ // Include all critical/high, top 5 medium, top 3 low
+ const criticalFindings = result.findings.filter((f) => f.severity === "critical");
+ const highFindings = result.findings.filter((f) => f.severity === "high");
+ const mediumFindings = result.findings.filter((f) => f.severity === "medium").slice(0, 5);
+ const lowFindings = result.findings.filter((f) => f.severity === "low").slice(0, 3);
+
+ const keyFindingsToSave = [
+ ...criticalFindings,
+ ...highFindings,
+ ...mediumFindings,
+ ...lowFindings,
+ ].map((f) => ({
+ severity: f.severity,
+ category: f.category,
+ title: f.title,
+ description: f.description.substring(0, 500), // Truncate for storage
+ file: f.file,
+ line: f.line,
+ }));
+
+ // Extract gotchas: security issues, critical bugs, and common mistakes
+ const gotchaCategories = ["security", "error_handling", "data_validation", "race_condition"];
+ const gotchasToSave = result.findings
+ .filter(
+ (f) =>
+ f.severity === "critical" ||
+ f.severity === "high" ||
+ gotchaCategories.includes(f.category?.toLowerCase() || "")
+ )
+ .map((f) => `[${f.category}] ${f.title}: ${f.description.substring(0, 300)}`);
+
+ // Extract patterns: group findings by category to identify recurring issues
+ const categoryGroups = result.findings.reduce(
+ (acc, f) => {
+ const cat = f.category || "general";
+ acc[cat] = (acc[cat] || 0) + 1;
+ return acc;
+ },
+ {} as Record
+ );
+
+ // Patterns are categories that appear multiple times (indicates a systematic issue)
+ const patternsToSave = Object.entries(categoryGroups)
+ .filter(([_, count]) => count >= 2)
+ .map(([category, count]) => `${category}: ${count} occurrences`);
+
+ const memoryContent: PRReviewMemory = {
+ prNumber: result.prNumber,
+ repo,
+ verdict: result.overallStatus || "unknown",
+ timestamp: new Date().toISOString(),
+ summary: {
+ verdict: result.overallStatus || "unknown",
+ finding_counts: {
+ critical: criticalFindings.length,
+ high: highFindings.length,
+ medium: result.findings.filter((f) => f.severity === "medium").length,
+ low: result.findings.filter((f) => f.severity === "low").length,
+ },
+ total_findings: result.findings.length,
+ },
+ keyFindings: keyFindingsToSave,
+ patterns: patternsToSave,
+ gotchas: gotchasToSave,
+ isFollowup,
+ };
+
+ // Add follow-up specific info if applicable
+ if (isFollowup && result.resolvedFindings && result.unresolvedFindings) {
+ memoryContent.summary.verdict_reasoning = `Resolved: ${result.resolvedFindings.length}, Unresolved: ${result.unresolvedFindings.length}`;
+ }
+
+ // Save to memory as a pr_review episode
+ const episodeName = `PR #${result.prNumber} ${isFollowup ? "Follow-up " : ""}Review - ${repo}`;
+ const saveResult = await memoryService.addEpisode(
+ episodeName,
+ memoryContent,
+ "pr_review",
+ `pr_review_${repo.replace("/", "_")}`
+ );
+
+ if (saveResult.success) {
+ debugLog("PR review saved to memory", {
+ prNumber: result.prNumber,
+ episodeId: saveResult.id,
+ });
+ } else {
+ debugLog("Failed to save PR review to memory", { error: saveResult.error });
+ }
+ } catch (error) {
+ // Don't fail the review if memory save fails
+ debugLog("Error saving PR review to memory", {
+ error: error instanceof Error ? error.message : error,
+ });
+ }
+}
+
/**
* PR data from GitHub API
*/
@@ -154,7 +363,7 @@ export interface PRData {
* PR review progress status
*/
export interface PRReviewProgress {
- phase: 'fetching' | 'analyzing' | 'generating' | 'posting' | 'complete';
+ phase: "fetching" | "analyzing" | "generating" | "posting" | "complete";
prNumber: number;
progress: number;
message: string;
@@ -164,18 +373,26 @@ export interface PRReviewProgress {
* Get the GitHub directory for a project
*/
function getGitHubDir(project: Project): string {
- return path.join(project.path, '.auto-claude', 'github');
+ return path.join(project.path, ".auto-claude", "github");
}
/**
* PR log phase type
*/
-type PRLogPhase = 'context' | 'analysis' | 'synthesis';
+type PRLogPhase = "context" | "analysis" | "synthesis";
/**
* PR log entry type
*/
-type PRLogEntryType = 'text' | 'tool_start' | 'tool_end' | 'phase_start' | 'phase_end' | 'error' | 'success' | 'info';
+type PRLogEntryType =
+ | "text"
+ | "tool_start"
+ | "tool_end"
+ | "phase_start"
+ | "phase_end"
+ | "error"
+ | "success"
+ | "info";
/**
* Single PR log entry
@@ -195,7 +412,7 @@ interface PRLogEntry {
*/
interface PRPhaseLog {
phase: PRLogPhase;
- status: 'pending' | 'active' | 'completed' | 'failed';
+ status: "pending" | "active" | "completed" | "failed";
started_at: string | null;
completed_at: string | null;
entries: PRLogEntry[];
@@ -249,7 +466,7 @@ function parseLogLine(line: string): { source: string; content: string; isError:
for (const pattern of patterns) {
const match = line.match(pattern);
if (match) {
- const isDebugOrError = pattern.source.includes('DEBUG') || pattern.source.includes('ERROR');
+ const isDebugOrError = pattern.source.includes("DEBUG") || pattern.source.includes("ERROR");
if (isDebugOrError && match.length >= 3) {
// Skip debug messages that only show message types (not useful)
if (match[2].match(/^Message #\d+: \w+Message/)) {
@@ -258,10 +475,10 @@ function parseLogLine(line: string): { source: string; content: string; isError:
return {
source: match[1],
content: match[2],
- isError: pattern.source.includes('ERROR'),
+ isError: pattern.source.includes("ERROR"),
};
}
- const source = line.match(/^\[(\w+(?:\s+\w+)*)\]/)?.[1] || 'Unknown';
+ const source = line.match(/^\[(\w+(?:\s+\w+)*)\]/)?.[1] || "Unknown";
return {
source,
content: match[1] || line,
@@ -274,7 +491,7 @@ function parseLogLine(line: string): { source: string; content: string; isError:
const prProgressMatch = line.match(/^\[PR #\d+\]\s*\[\s*(\d+)%\]\s*(.*)$/);
if (prProgressMatch) {
return {
- source: 'Progress',
+ source: "Progress",
content: `[${prProgressMatch[1]}%] ${prProgressMatch[2]}`,
isError: false,
};
@@ -284,7 +501,7 @@ function parseLogLine(line: string): { source: string; content: string; isError:
const progressMatch = line.match(/^\[(\d+)%\]\s*(.*)$/);
if (progressMatch) {
return {
- source: 'Progress',
+ source: "Progress",
content: `[${progressMatch[1]}%] ${progressMatch[2]}`,
isError: false,
};
@@ -313,7 +530,7 @@ function parseLogLine(line: string): { source: string; content: string; isError:
const match = line.match(pattern);
if (match) {
return {
- source: 'Summary',
+ source: "Summary",
content: line,
isError: false,
};
@@ -327,16 +544,23 @@ function parseLogLine(line: string): { source: string; content: string; isError:
* Determine the phase from source
*/
function getPhaseFromSource(source: string): PRLogPhase {
- const contextSources = ['Context', 'BotDetector'];
- const analysisSources = ['AI', 'Orchestrator', 'ParallelOrchestrator', 'ParallelFollowup', 'Followup', 'orchestrator'];
- const synthesisSources = ['PR Review Engine', 'Summary', 'Progress'];
+ const contextSources = ["Context", "BotDetector"];
+ const analysisSources = [
+ "AI",
+ "Orchestrator",
+ "ParallelOrchestrator",
+ "ParallelFollowup",
+ "Followup",
+ "orchestrator",
+ ];
+ const synthesisSources = ["PR Review Engine", "Summary", "Progress"];
- if (contextSources.includes(source)) return 'context';
- if (analysisSources.includes(source)) return 'analysis';
+ if (contextSources.includes(source)) return "context";
+ if (analysisSources.includes(source)) return "analysis";
// Specialist agents (Agent:xxx) are part of analysis phase
- if (source.startsWith('Agent:')) return 'analysis';
- if (synthesisSources.includes(source)) return 'synthesis';
- return 'synthesis'; // Default to synthesis for unknown sources
+ if (source.startsWith("Agent:")) return "analysis";
+ if (synthesisSources.includes(source)) return "synthesis";
+ return "synthesis"; // Default to synthesis for unknown sources
}
/**
@@ -346,7 +570,7 @@ function createEmptyPRLogs(prNumber: number, repo: string, isFollowup: boolean):
const now = new Date().toISOString();
const createEmptyPhase = (phase: PRLogPhase): PRPhaseLog => ({
phase,
- status: 'pending',
+ status: "pending",
started_at: null,
completed_at: null,
entries: [],
@@ -359,9 +583,9 @@ function createEmptyPRLogs(prNumber: number, repo: string, isFollowup: boolean):
updated_at: now,
is_followup: isFollowup,
phases: {
- context: createEmptyPhase('context'),
- analysis: createEmptyPhase('analysis'),
- synthesis: createEmptyPhase('synthesis'),
+ context: createEmptyPhase("context"),
+ analysis: createEmptyPhase("analysis"),
+ synthesis: createEmptyPhase("synthesis"),
},
};
}
@@ -370,7 +594,7 @@ function createEmptyPRLogs(prNumber: number, repo: string, isFollowup: boolean):
* Get PR logs file path
*/
function getPRLogsPath(project: Project, prNumber: number): string {
- return path.join(getGitHubDir(project), 'pr', `logs_${prNumber}.json`);
+ return path.join(getGitHubDir(project), "pr", `logs_${prNumber}.json`);
}
/**
@@ -380,7 +604,7 @@ function loadPRLogs(project: Project, prNumber: number): PRLogs | null {
const logsPath = getPRLogsPath(project, prNumber);
try {
- const rawData = fs.readFileSync(logsPath, 'utf-8');
+ const rawData = fs.readFileSync(logsPath, "utf-8");
const sanitizedData = sanitizeNetworkData(rawData);
return JSON.parse(sanitizedData) as PRLogs;
} catch {
@@ -400,7 +624,7 @@ function savePRLogs(project: Project, logs: PRLogs): void {
}
logs.updated_at = new Date().toISOString();
- fs.writeFileSync(logsPath, JSON.stringify(logs, null, 2), 'utf-8');
+ fs.writeFileSync(logsPath, JSON.stringify(logs, null, 2), "utf-8");
}
/**
@@ -412,8 +636,8 @@ function addLogEntry(logs: PRLogs, entry: PRLogEntry): boolean {
let statusChanged = false;
// Start the phase if it was pending
- if (phase.status === 'pending') {
- phase.status = 'active';
+ if (phase.status === "pending") {
+ phase.status = "active";
phase.started_at = entry.timestamp;
statusChanged = true;
}
@@ -429,7 +653,7 @@ function addLogEntry(logs: PRLogs, entry: PRLogEntry): boolean {
class PRLogCollector {
private logs: PRLogs;
private project: Project;
- private currentPhase: PRLogPhase = 'context';
+ private currentPhase: PRLogPhase = "context";
private entryCount: number = 0;
private saveInterval: number = 3; // Save every N entries for real-time streaming
@@ -451,14 +675,14 @@ class PRLogCollector {
// When moving to a new phase, mark the previous phase as complete
// Only mark complete if the phase was actually active (received log entries)
// This prevents marking phases as "completed" if they were skipped
- if (this.currentPhase === 'context' && (phase === 'analysis' || phase === 'synthesis')) {
- if (this.logs.phases.context.status === 'active') {
- this.markPhaseComplete('context', true);
+ if (this.currentPhase === "context" && (phase === "analysis" || phase === "synthesis")) {
+ if (this.logs.phases.context.status === "active") {
+ this.markPhaseComplete("context", true);
}
}
- if (this.currentPhase === 'analysis' && phase === 'synthesis') {
- if (this.logs.phases.analysis.status === 'active') {
- this.markPhaseComplete('analysis', true);
+ if (this.currentPhase === "analysis" && phase === "synthesis") {
+ if (this.logs.phases.analysis.status === "active") {
+ this.markPhaseComplete("analysis", true);
}
}
this.currentPhase = phase;
@@ -466,7 +690,7 @@ class PRLogCollector {
const entry: PRLogEntry = {
timestamp: new Date().toISOString(),
- type: parsed.isError ? 'error' : 'text',
+ type: parsed.isError ? "error" : "text",
content: parsed.content,
phase,
source: parsed.source,
@@ -484,7 +708,7 @@ class PRLogCollector {
markPhaseComplete(phase: PRLogPhase, success: boolean): void {
const phaseLog = this.logs.phases[phase];
- phaseLog.status = success ? 'completed' : 'failed';
+ phaseLog.status = success ? "completed" : "failed";
phaseLog.completed_at = new Date().toISOString();
// Save immediately so frontend sees the status change
this.save();
@@ -497,9 +721,9 @@ class PRLogCollector {
finalize(success: boolean): void {
// Mark active phases as completed based on success status
// Pending phases with no entries should stay pending (they never ran)
- for (const phase of ['context', 'analysis', 'synthesis'] as PRLogPhase[]) {
+ for (const phase of ["context", "analysis", "synthesis"] as PRLogPhase[]) {
const phaseLog = this.logs.phases[phase];
- if (phaseLog.status === 'active') {
+ if (phaseLog.status === "active") {
this.markPhaseComplete(phase, success);
}
// Note: Pending phases stay pending - they never received any log entries
@@ -513,35 +737,37 @@ class PRLogCollector {
* Get saved PR review result
*/
function getReviewResult(project: Project, prNumber: number): PRReviewResult | null {
- const reviewPath = path.join(getGitHubDir(project), 'pr', `review_${prNumber}.json`);
+ const reviewPath = path.join(getGitHubDir(project), "pr", `review_${prNumber}.json`);
try {
- const rawData = fs.readFileSync(reviewPath, 'utf-8');
+ const rawData = fs.readFileSync(reviewPath, "utf-8");
const sanitizedData = sanitizeNetworkData(rawData);
const data = JSON.parse(sanitizedData);
return {
prNumber: data.pr_number,
repo: data.repo,
success: data.success,
- findings: data.findings?.map((f: Record) => ({
- id: f.id,
- severity: f.severity,
- category: f.category,
- title: f.title,
- description: f.description,
- file: f.file,
- line: f.line,
- endLine: f.end_line,
- suggestedFix: f.suggested_fix,
- fixable: f.fixable ?? false,
- })) ?? [],
- summary: data.summary ?? '',
- overallStatus: data.overall_status ?? 'comment',
+ findings:
+ data.findings?.map((f: Record) => ({
+ id: f.id,
+ severity: f.severity,
+ category: f.category,
+ title: f.title,
+ description: f.description,
+ file: f.file,
+ line: f.line,
+ endLine: f.end_line,
+ suggestedFix: f.suggested_fix,
+ fixable: f.fixable ?? false,
+ })) ?? [],
+ summary: data.summary ?? "",
+ overallStatus: data.overall_status ?? "comment",
reviewId: data.review_id,
reviewedAt: data.reviewed_at ?? new Date().toISOString(),
error: data.error,
// Follow-up review fields (snake_case -> camelCase)
reviewedCommitSha: data.reviewed_commit_sha,
+ reviewedFileBlobs: data.reviewed_file_blobs,
isFollowupReview: data.is_followup_review ?? false,
previousReviewId: data.previous_review_id,
resolvedFindings: data.resolved_findings ?? [],
@@ -575,9 +801,9 @@ function getGitHubPRSettings(): { model: string; thinkingLevel: string } {
const thinkingLevel = featureThinking.githubPrs ?? DEFAULT_FEATURE_THINKING.githubPrs;
// Convert model short name to full model ID
- const model = MODEL_ID_MAP[modelShort] ?? MODEL_ID_MAP['opus'];
+ const model = MODEL_ID_MAP[modelShort] ?? MODEL_ID_MAP["opus"];
- debugLog('GitHub PR settings', { modelShort, model, thinkingLevel });
+ debugLog("GitHub PR settings", { modelShort, model, thinkingLevel });
return { model, thinkingLevel };
}
@@ -615,23 +841,20 @@ async function runPRReview(
const args = buildRunnerArgs(
getRunnerPath(backendPath),
project.path,
- 'review-pr',
+ "review-pr",
[prNumber.toString()],
{ model, thinkingLevel }
);
- debugLog('Spawning PR review process', { args, model, thinkingLevel });
+ debugLog("Spawning PR review process", { args, model, thinkingLevel });
// Create log collector for this review
const config = getGitHubConfig(project);
- const repo = config?.repo || project.name || 'unknown';
+ const repo = config?.repo || project.name || "unknown";
const logCollector = new PRLogCollector(project, prNumber, repo, false);
// Build environment with project settings
- const subprocessEnv: Record = {};
- if (project.settings?.useClaudeMd !== false) {
- subprocessEnv['USE_CLAUDE_MD'] = 'true';
- }
+ const subprocessEnv = await getRunnerEnv(getClaudeMdEnv(project));
const { process: childProcess, promise } = runPythonSubprocess({
pythonPath: getPythonPath(backendPath),
@@ -639,27 +862,27 @@ async function runPRReview(
cwd: backendPath,
env: subprocessEnv,
onProgress: (percent, message) => {
- debugLog('Progress update', { percent, message });
+ debugLog("Progress update", { percent, message });
sendProgress({
- phase: 'analyzing',
+ phase: "analyzing",
prNumber,
progress: percent,
message,
});
},
onStdout: (line) => {
- debugLog('STDOUT:', line);
+ debugLog("STDOUT:", line);
// Collect log entries
logCollector.processLine(line);
},
- onStderr: (line) => debugLog('STDERR:', line),
+ onStderr: (line) => debugLog("STDERR:", line),
onComplete: () => {
// Load the result from disk
const reviewResult = getReviewResult(project, prNumber);
if (!reviewResult) {
- throw new Error('Review completed but result not found');
+ throw new Error("Review completed but result not found");
}
- debugLog('Review result loaded', { findingsCount: reviewResult.findings.length });
+ debugLog("Review result loaded", { findingsCount: reviewResult.findings.length });
return reviewResult;
},
});
@@ -667,7 +890,7 @@ async function runPRReview(
// Register the running process
const reviewKey = getReviewKey(project.id, prNumber);
runningReviews.set(reviewKey, childProcess);
- debugLog('Registered review process', { reviewKey, pid: childProcess.pid });
+ debugLog("Registered review process", { reviewKey, pid: childProcess.pid });
try {
// Wait for the process to complete
@@ -676,44 +899,49 @@ async function runPRReview(
if (!result.success) {
// Finalize logs with failure
logCollector.finalize(false);
- throw new Error(result.error ?? 'Review failed');
+ throw new Error(result.error ?? "Review failed");
}
// Finalize logs with success
logCollector.finalize(true);
+
+ // Save PR review insights to memory (async, non-blocking)
+ savePRReviewToMemory(result.data!, repo, false).catch((err) => {
+ debugLog("Failed to save PR review to memory", { error: err.message });
+ });
+
return result.data!;
} finally {
// Clean up the registry when done (success or error)
runningReviews.delete(reviewKey);
- debugLog('Unregistered review process', { reviewKey });
+ debugLog("Unregistered review process", { reviewKey });
}
}
/**
* Register PR-related handlers
*/
-export function registerPRHandlers(
- getMainWindow: () => BrowserWindow | null
-): void {
- debugLog('Registering PR handlers');
+export function registerPRHandlers(getMainWindow: () => BrowserWindow | null): void {
+ debugLog("Registering PR handlers");
- // List open PRs
+ // List open PRs with pagination support
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_LIST,
- async (_, projectId: string): Promise => {
- debugLog('listPRs handler called', { projectId });
+ async (_, projectId: string, page: number = 1): Promise => {
+ debugLog("listPRs handler called", { projectId, page });
const result = await withProjectOrNull(projectId, async (project) => {
const config = getGitHubConfig(project);
if (!config) {
- debugLog('No GitHub config found for project');
+ debugLog("No GitHub config found for project");
return [];
}
try {
- const prs = await githubFetch(
+ // Use pagination: per_page=100 (GitHub max), page=1,2,3...
+ const prs = (await githubFetch(
config.token,
- `/repos/${config.repo}/pulls?state=open&per_page=50`
- ) as Array<{
+ `/repos/${config.repo}/pulls?state=open&per_page=100&page=${page}`
+ )) as Array<{
number: number;
title: string;
body?: string;
@@ -730,18 +958,18 @@ export function registerPRHandlers(
html_url: string;
}>;
- debugLog('Fetched PRs', { count: prs.length });
- return prs.map(pr => ({
+ debugLog("Fetched PRs", { count: prs.length, page, samplePr: prs[0] });
+ return prs.map((pr) => ({
number: pr.number,
title: pr.title,
- body: pr.body ?? '',
+ body: pr.body ?? "",
state: pr.state,
author: { login: pr.user.login },
headRefName: pr.head.ref,
baseRefName: pr.base.ref,
- additions: pr.additions,
- deletions: pr.deletions,
- changedFiles: pr.changed_files,
+ additions: pr.additions ?? 0,
+ deletions: pr.deletions ?? 0,
+ changedFiles: pr.changed_files ?? 0,
assignees: pr.assignees?.map((a: { login: string }) => ({ login: a.login })) ?? [],
files: [],
createdAt: pr.created_at,
@@ -749,7 +977,9 @@ export function registerPRHandlers(
htmlUrl: pr.html_url,
}));
} catch (error) {
- debugLog('Failed to fetch PRs', { error: error instanceof Error ? error.message : error });
+ debugLog("Failed to fetch PRs", {
+ error: error instanceof Error ? error.message : error,
+ });
return [];
}
});
@@ -761,16 +991,16 @@ export function registerPRHandlers(
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_GET,
async (_, projectId: string, prNumber: number): Promise => {
- debugLog('getPR handler called', { projectId, prNumber });
+ debugLog("getPR handler called", { projectId, prNumber });
return withProjectOrNull(projectId, async (project) => {
const config = getGitHubConfig(project);
if (!config) return null;
try {
- const pr = await githubFetch(
+ const pr = (await githubFetch(
config.token,
`/repos/${config.repo}/pulls/${prNumber}`
- ) as {
+ )) as {
number: number;
title: string;
body?: string;
@@ -787,10 +1017,10 @@ export function registerPRHandlers(
html_url: string;
};
- const files = await githubFetch(
+ const files = (await githubFetch(
config.token,
`/repos/${config.repo}/pulls/${prNumber}/files`
- ) as Array<{
+ )) as Array<{
filename: string;
additions: number;
deletions: number;
@@ -800,19 +1030,19 @@ export function registerPRHandlers(
return {
number: pr.number,
title: pr.title,
- body: pr.body ?? '',
+ body: pr.body ?? "",
state: pr.state,
author: { login: pr.user.login },
headRefName: pr.head.ref,
baseRefName: pr.base.ref,
- additions: pr.additions,
- deletions: pr.deletions,
- changedFiles: pr.changed_files,
+ additions: pr.additions ?? 0,
+ deletions: pr.deletions ?? 0,
+ changedFiles: pr.changed_files ?? 0,
assignees: pr.assignees?.map((a: { login: string }) => ({ login: a.login })) ?? [],
- files: files.map(f => ({
+ files: files.map((f) => ({
path: f.filename,
- additions: f.additions,
- deletions: f.deletions,
+ additions: f.additions ?? 0,
+ deletions: f.deletions ?? 0,
status: f.status,
})),
createdAt: pr.created_at,
@@ -835,15 +1065,15 @@ export function registerPRHandlers(
if (!config) return null;
try {
- const { execFileSync } = await import('child_process');
+ const { execFileSync } = await import("child_process");
// Validate prNumber to prevent command injection
if (!Number.isInteger(prNumber) || prNumber <= 0) {
- throw new Error('Invalid PR number');
+ throw new Error("Invalid PR number");
}
// Use execFileSync with arguments array to prevent command injection
- const diff = execFileSync('gh', ['pr', 'diff', String(prNumber)], {
+ const diff = execFileSync("gh", ["pr", "diff", String(prNumber)], {
cwd: project.path,
- encoding: 'utf-8',
+ encoding: "utf-8",
env: getAugmentedEnv(),
});
return diff;
@@ -864,6 +1094,29 @@ export function registerPRHandlers(
}
);
+ // Batch get saved reviews - more efficient than individual calls
+ ipcMain.handle(
+ IPC_CHANNELS.GITHUB_PR_GET_REVIEWS_BATCH,
+ async (
+ _,
+ projectId: string,
+ prNumbers: number[]
+ ): Promise> => {
+ debugLog("getReviewsBatch handler called", { projectId, count: prNumbers.length });
+ const result = await withProjectOrNull(projectId, async (project) => {
+ const reviews: Record = {};
+ for (const prNumber of prNumbers) {
+ reviews[prNumber] = getReviewResult(project, prNumber);
+ }
+ debugLog("Batch loaded reviews", {
+ count: Object.values(reviews).filter((r) => r !== null).length,
+ });
+ return reviews;
+ });
+ return result ?? {};
+ }
+ );
+
// Get PR review logs
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_GET_LOGS,
@@ -875,82 +1128,20 @@ export function registerPRHandlers(
);
// Run AI review
- ipcMain.on(
- IPC_CHANNELS.GITHUB_PR_REVIEW,
- async (_, projectId: string, prNumber: number) => {
- debugLog('runPRReview handler called', { projectId, prNumber });
- const mainWindow = getMainWindow();
- if (!mainWindow) {
- debugLog('No main window available');
- return;
- }
-
- try {
- await withProjectOrNull(projectId, async (project) => {
- const { sendProgress, sendComplete } = createIPCCommunicators(
- mainWindow,
- {
- progress: IPC_CHANNELS.GITHUB_PR_REVIEW_PROGRESS,
- error: IPC_CHANNELS.GITHUB_PR_REVIEW_ERROR,
- complete: IPC_CHANNELS.GITHUB_PR_REVIEW_COMPLETE,
- },
- projectId
- );
-
- debugLog('Starting PR review', { prNumber });
- sendProgress({
- phase: 'fetching',
- prNumber,
- progress: 5,
- message: 'Assigning you to PR...',
- });
-
- // Auto-assign current user to PR
- const config = getGitHubConfig(project);
- if (config) {
- try {
- // Get current user
- const user = await githubFetch(config.token, '/user') as { login: string };
- debugLog('Auto-assigning user to PR', { prNumber, username: user.login });
-
- // Assign to PR
- await githubFetch(
- config.token,
- `/repos/${config.repo}/issues/${prNumber}/assignees`,
- {
- method: 'POST',
- body: JSON.stringify({ assignees: [user.login] }),
- }
- );
- debugLog('User assigned successfully', { prNumber, username: user.login });
- } catch (assignError) {
- // Don't fail the review if assignment fails, just log it
- debugLog('Failed to auto-assign user', { prNumber, error: assignError instanceof Error ? assignError.message : assignError });
- }
- }
-
- sendProgress({
- phase: 'fetching',
- prNumber,
- progress: 10,
- message: 'Fetching PR data...',
- });
-
- const result = await runPRReview(project, prNumber, mainWindow);
-
- debugLog('PR review completed', { prNumber, findingsCount: result.findings.length });
- sendProgress({
- phase: 'complete',
- prNumber,
- progress: 100,
- message: 'Review complete!',
- });
+ ipcMain.on(IPC_CHANNELS.GITHUB_PR_REVIEW, async (_, projectId: string, prNumber: number) => {
+ debugLog("runPRReview handler called", { projectId, prNumber });
+ const mainWindow = getMainWindow();
+ if (!mainWindow) {
+ debugLog("No main window available");
+ return;
+ }
- sendComplete(result);
- });
- } catch (error) {
- debugLog('PR review failed', { prNumber, error: error instanceof Error ? error.message : error });
- const { sendError } = createIPCCommunicators(
+ try {
+ await withProjectOrNull(projectId, async (project) => {
+ const { sendProgress, sendComplete } = createIPCCommunicators<
+ PRReviewProgress,
+ PRReviewResult
+ >(
mainWindow,
{
progress: IPC_CHANNELS.GITHUB_PR_REVIEW_PROGRESS,
@@ -959,26 +1150,101 @@ export function registerPRHandlers(
},
projectId
);
- sendError(error instanceof Error ? error.message : 'Failed to run PR review');
- }
+
+ debugLog("Starting PR review", { prNumber });
+ sendProgress({
+ phase: "fetching",
+ prNumber,
+ progress: 5,
+ message: "Assigning you to PR...",
+ });
+
+ // Auto-assign current user to PR
+ const config = getGitHubConfig(project);
+ if (config) {
+ try {
+ // Get current user
+ const user = (await githubFetch(config.token, "/user")) as { login: string };
+ debugLog("Auto-assigning user to PR", { prNumber, username: user.login });
+
+ // Assign to PR
+ await githubFetch(config.token, `/repos/${config.repo}/issues/${prNumber}/assignees`, {
+ method: "POST",
+ body: JSON.stringify({ assignees: [user.login] }),
+ });
+ debugLog("User assigned successfully", { prNumber, username: user.login });
+ } catch (assignError) {
+ // Don't fail the review if assignment fails, just log it
+ debugLog("Failed to auto-assign user", {
+ prNumber,
+ error: assignError instanceof Error ? assignError.message : assignError,
+ });
+ }
+ }
+
+ sendProgress({
+ phase: "fetching",
+ prNumber,
+ progress: 10,
+ message: "Fetching PR data...",
+ });
+
+ const result = await runPRReview(project, prNumber, mainWindow);
+
+ debugLog("PR review completed", { prNumber, findingsCount: result.findings.length });
+ sendProgress({
+ phase: "complete",
+ prNumber,
+ progress: 100,
+ message: "Review complete!",
+ });
+
+ sendComplete(result);
+ });
+ } catch (error) {
+ debugLog("PR review failed", {
+ prNumber,
+ error: error instanceof Error ? error.message : error,
+ });
+ const { sendError } = createIPCCommunicators(
+ mainWindow,
+ {
+ progress: IPC_CHANNELS.GITHUB_PR_REVIEW_PROGRESS,
+ error: IPC_CHANNELS.GITHUB_PR_REVIEW_ERROR,
+ complete: IPC_CHANNELS.GITHUB_PR_REVIEW_COMPLETE,
+ },
+ projectId
+ );
+ sendError(error instanceof Error ? error.message : "Failed to run PR review");
}
- );
+ });
// Post review to GitHub
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_POST_REVIEW,
- async (_, projectId: string, prNumber: number, selectedFindingIds?: string[]): Promise => {
- debugLog('postPRReview handler called', { projectId, prNumber, selectedCount: selectedFindingIds?.length });
+ async (
+ _,
+ projectId: string,
+ prNumber: number,
+ selectedFindingIds?: string[],
+ options?: { forceApprove?: boolean }
+ ): Promise => {
+ debugLog("postPRReview handler called", {
+ projectId,
+ prNumber,
+ selectedCount: selectedFindingIds?.length,
+ forceApprove: options?.forceApprove,
+ });
const postResult = await withProjectOrNull(projectId, async (project) => {
const result = getReviewResult(project, prNumber);
if (!result) {
- debugLog('No review result found', { prNumber });
+ debugLog("No review result found", { prNumber });
return false;
}
const config = getGitHubConfig(project);
if (!config) {
- debugLog('No GitHub config found');
+ debugLog("No GitHub config found");
return false;
}
@@ -986,115 +1252,179 @@ export function registerPRHandlers(
// Filter findings if selection provided
const selectedSet = selectedFindingIds ? new Set(selectedFindingIds) : null;
const findings = selectedSet
- ? result.findings.filter(f => selectedSet.has(f.id))
+ ? result.findings.filter((f) => selectedSet.has(f.id))
: result.findings;
- debugLog('Posting findings', { total: result.findings.length, selected: findings.length });
-
- // Build review body
- let body = `## 🤖 Auto Claude PR Review\n\n${result.summary}\n\n`;
-
- if (findings.length > 0) {
- // Show selected count vs total if filtered
- const countText = selectedSet
- ? `${findings.length} selected of ${result.findings.length} total`
- : `${findings.length} total`;
- body += `### Findings (${countText})\n\n`;
-
- for (const f of findings) {
- const emoji = { critical: '🔴', high: '🟠', medium: '🟡', low: '🔵' }[f.severity] || '⚪';
- body += `#### ${emoji} [${f.severity.toUpperCase()}] ${f.title}\n`;
- body += `📁 \`${f.file}:${f.line}\`\n\n`;
- body += `${f.description}\n\n`;
- // Only show suggested fix if it has actual content
- const suggestedFix = f.suggestedFix?.trim();
- if (suggestedFix) {
- body += `**Suggested fix:**\n\`\`\`\n${suggestedFix}\n\`\`\`\n\n`;
+ debugLog("Posting findings", {
+ total: result.findings.length,
+ selected: findings.length,
+ });
+
+ // Build review body - different format for auto-approve with suggestions
+ let body: string;
+
+ if (options?.forceApprove) {
+ // Auto-approve format: clean approval message with optional suggestions
+ body = `## ✅ Auto Claude Review - APPROVED\n\n`;
+ body += `**Status:** Ready to Merge\n\n`;
+ body += `**Summary:** ${result.summary}\n\n`;
+
+ if (findings.length > 0) {
+ body += `---\n\n`;
+ body += `### 💡 Suggestions (${findings.length})\n\n`;
+ body += `*These are non-blocking suggestions for consideration:*\n\n`;
+
+ for (const f of findings) {
+ const emoji =
+ { critical: "🔴", high: "🟠", medium: "🟡", low: "🔵" }[f.severity] || "⚪";
+ body += `#### ${emoji} [${f.id}] [${f.severity.toUpperCase()}] ${f.title}\n`;
+ body += `📁 \`${f.file}:${f.line}\`\n\n`;
+ body += `${f.description}\n\n`;
+ const suggestedFix = f.suggestedFix?.trim();
+ if (suggestedFix) {
+ body += `**Suggested fix:**\n\`\`\`\n${suggestedFix}\n\`\`\`\n\n`;
+ }
}
}
+
+ body += `---\n*This automated review found no blocking issues. The PR can be safely merged.*\n\n`;
+ body += `*Generated by Auto Claude*`;
} else {
- body += `*No findings selected for this review.*\n\n`;
- }
+ // Standard review format
+ body = `## 🤖 Auto Claude PR Review\n\n${result.summary}\n\n`;
+
+ if (findings.length > 0) {
+ // Show selected count vs total if filtered
+ const countText = selectedSet
+ ? `${findings.length} selected of ${result.findings.length} total`
+ : `${findings.length} total`;
+ body += `### Findings (${countText})\n\n`;
+
+ for (const f of findings) {
+ const emoji =
+ { critical: "🔴", high: "🟠", medium: "🟡", low: "🔵" }[f.severity] || "⚪";
+ body += `#### ${emoji} [${f.id}] [${f.severity.toUpperCase()}] ${f.title}\n`;
+ body += `📁 \`${f.file}:${f.line}\`\n\n`;
+ body += `${f.description}\n\n`;
+ // Only show suggested fix if it has actual content
+ const suggestedFix = f.suggestedFix?.trim();
+ if (suggestedFix) {
+ body += `**Suggested fix:**\n\`\`\`\n${suggestedFix}\n\`\`\`\n\n`;
+ }
+ }
+ } else {
+ body += `*No findings selected for this review.*\n\n`;
+ }
- body += `---\n*This review was generated by Auto Claude.*`;
+ body += `---\n*This review was generated by Auto Claude.*`;
+ }
- // Determine review status based on selected findings
+ // Determine review status based on selected findings (or force approve)
let overallStatus = result.overallStatus;
- if (selectedSet) {
- const hasBlocker = findings.some(f => f.severity === 'critical' || f.severity === 'high');
- overallStatus = hasBlocker ? 'request_changes' : (findings.length > 0 ? 'comment' : 'approve');
+ if (options?.forceApprove) {
+ // Force approve regardless of findings
+ overallStatus = "approve";
+ } else if (selectedSet) {
+ const hasBlocker = findings.some(
+ (f) => f.severity === "critical" || f.severity === "high"
+ );
+ overallStatus = hasBlocker
+ ? "request_changes"
+ : findings.length > 0
+ ? "comment"
+ : "approve";
}
// Map to GitHub API event type
- const event = overallStatus === 'approve' ? 'APPROVE' :
- overallStatus === 'request_changes' ? 'REQUEST_CHANGES' : 'COMMENT';
-
- debugLog('Posting review to GitHub', { prNumber, status: overallStatus, event, findingsCount: findings.length });
+ const event =
+ overallStatus === "approve"
+ ? "APPROVE"
+ : overallStatus === "request_changes"
+ ? "REQUEST_CHANGES"
+ : "COMMENT";
+
+ debugLog("Posting review to GitHub", {
+ prNumber,
+ status: overallStatus,
+ event,
+ findingsCount: findings.length,
+ });
// Post review via GitHub API to capture review ID
let reviewId: number;
try {
- const reviewResponse = await githubFetch(
+ const reviewResponse = (await githubFetch(
config.token,
`/repos/${config.repo}/pulls/${prNumber}/reviews`,
{
- method: 'POST',
+ method: "POST",
body: JSON.stringify({
body,
event,
}),
}
- ) as { id: number };
+ )) as { id: number };
reviewId = reviewResponse.id;
} catch (error) {
// GitHub doesn't allow REQUEST_CHANGES or APPROVE on your own PR
// Fall back to COMMENT if that's the error
const errorMsg = error instanceof Error ? error.message : String(error);
- if (errorMsg.includes('Can not request changes on your own pull request') ||
- errorMsg.includes('Can not approve your own pull request')) {
- debugLog('Cannot use REQUEST_CHANGES/APPROVE on own PR, falling back to COMMENT', { prNumber });
- const fallbackResponse = await githubFetch(
+ if (
+ errorMsg.includes("Can not request changes on your own pull request") ||
+ errorMsg.includes("Can not approve your own pull request")
+ ) {
+ debugLog("Cannot use REQUEST_CHANGES/APPROVE on own PR, falling back to COMMENT", {
+ prNumber,
+ });
+ const fallbackResponse = (await githubFetch(
config.token,
`/repos/${config.repo}/pulls/${prNumber}/reviews`,
{
- method: 'POST',
+ method: "POST",
body: JSON.stringify({
body,
- event: 'COMMENT',
+ event: "COMMENT",
}),
}
- ) as { id: number };
+ )) as { id: number };
reviewId = fallbackResponse.id;
} else {
throw error;
}
}
- debugLog('Review posted successfully', { prNumber, reviewId });
+ debugLog("Review posted successfully", { prNumber, reviewId });
// Update the stored review result with the review ID and posted findings
- const reviewPath = path.join(getGitHubDir(project), 'pr', `review_${prNumber}.json`);
+ const reviewPath = path.join(getGitHubDir(project), "pr", `review_${prNumber}.json`);
try {
- const rawData = fs.readFileSync(reviewPath, 'utf-8');
+ const rawData = fs.readFileSync(reviewPath, "utf-8");
// Sanitize network data before parsing (review may contain data from GitHub API)
const sanitizedData = sanitizeNetworkData(rawData);
const data = JSON.parse(sanitizedData);
data.review_id = reviewId;
// Track posted findings to enable follow-up review
data.has_posted_findings = true;
- const newPostedIds = findings.map(f => f.id);
+ const newPostedIds = findings.map((f) => f.id);
const existingPostedIds = data.posted_finding_ids || [];
data.posted_finding_ids = [...new Set([...existingPostedIds, ...newPostedIds])];
data.posted_at = new Date().toISOString();
- fs.writeFileSync(reviewPath, JSON.stringify(data, null, 2), 'utf-8');
- debugLog('Updated review result with review ID and posted findings', { prNumber, reviewId, postedCount: newPostedIds.length });
+ fs.writeFileSync(reviewPath, JSON.stringify(data, null, 2), "utf-8");
+ debugLog("Updated review result with review ID and posted findings", {
+ prNumber,
+ reviewId,
+ postedCount: newPostedIds.length,
+ });
} catch {
// File doesn't exist or couldn't be read - this is expected for new reviews
- debugLog('Review result file not found or unreadable, skipping update', { prNumber });
+ debugLog("Review result file not found or unreadable, skipping update", { prNumber });
}
return true;
} catch (error) {
- debugLog('Failed to post review', { prNumber, error: error instanceof Error ? error.message : error });
+ debugLog("Failed to post review", {
+ prNumber,
+ error: error instanceof Error ? error.message : error,
+ });
return false;
}
});
@@ -1106,41 +1436,46 @@ export function registerPRHandlers(
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_POST_COMMENT,
async (_, projectId: string, prNumber: number, body: string): Promise => {
- debugLog('postPRComment handler called', { projectId, prNumber });
+ debugLog("postPRComment handler called", { projectId, prNumber });
const postResult = await withProjectOrNull(projectId, async (project) => {
try {
- const { execFileSync } = await import('child_process');
- const { writeFileSync, unlinkSync } = await import('fs');
- const { join } = await import('path');
+ const { execFileSync } = await import("child_process");
+ const { writeFileSync, unlinkSync } = await import("fs");
+ const { join } = await import("path");
- debugLog('Posting comment to PR', { prNumber });
+ debugLog("Posting comment to PR", { prNumber });
// Validate prNumber to prevent command injection
if (!Number.isInteger(prNumber) || prNumber <= 0) {
- throw new Error('Invalid PR number');
+ throw new Error("Invalid PR number");
}
// Use temp file to avoid shell escaping issues
- const tmpFile = join(project.path, '.auto-claude', 'tmp_comment_body.txt');
+ const tmpFile = join(project.path, ".auto-claude", "tmp_comment_body.txt");
try {
- writeFileSync(tmpFile, body, 'utf-8');
+ writeFileSync(tmpFile, body, "utf-8");
// Use execFileSync with arguments array to prevent command injection
- execFileSync('gh', ['pr', 'comment', String(prNumber), '--body-file', tmpFile], {
+ execFileSync("gh", ["pr", "comment", String(prNumber), "--body-file", tmpFile], {
cwd: project.path,
env: getAugmentedEnv(),
});
unlinkSync(tmpFile);
} catch (error) {
- try { unlinkSync(tmpFile); } catch {
+ try {
+ unlinkSync(tmpFile);
+ } catch {
// Ignore cleanup errors
}
throw error;
}
- debugLog('Comment posted successfully', { prNumber });
+ debugLog("Comment posted successfully", { prNumber });
return true;
} catch (error) {
- debugLog('Failed to post comment', { prNumber, error: error instanceof Error ? error.message : error });
+ debugLog("Failed to post comment", {
+ prNumber,
+ error: error instanceof Error ? error.message : error,
+ });
return false;
}
});
@@ -1152,51 +1487,54 @@ export function registerPRHandlers(
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_DELETE_REVIEW,
async (_, projectId: string, prNumber: number): Promise => {
- debugLog('deletePRReview handler called', { projectId, prNumber });
+ debugLog("deletePRReview handler called", { projectId, prNumber });
const deleteResult = await withProjectOrNull(projectId, async (project) => {
const result = getReviewResult(project, prNumber);
if (!result || !result.reviewId) {
- debugLog('No review ID found for deletion', { prNumber });
+ debugLog("No review ID found for deletion", { prNumber });
return false;
}
const config = getGitHubConfig(project);
if (!config) {
- debugLog('No GitHub config found');
+ debugLog("No GitHub config found");
return false;
}
try {
- debugLog('Deleting review from GitHub', { prNumber, reviewId: result.reviewId });
+ debugLog("Deleting review from GitHub", { prNumber, reviewId: result.reviewId });
// Delete review via GitHub API
await githubFetch(
config.token,
`/repos/${config.repo}/pulls/${prNumber}/reviews/${result.reviewId}`,
{
- method: 'DELETE',
+ method: "DELETE",
}
);
- debugLog('Review deleted successfully', { prNumber, reviewId: result.reviewId });
+ debugLog("Review deleted successfully", { prNumber, reviewId: result.reviewId });
// Clear the review ID from the stored result
- const reviewPath = path.join(getGitHubDir(project), 'pr', `review_${prNumber}.json`);
+ const reviewPath = path.join(getGitHubDir(project), "pr", `review_${prNumber}.json`);
try {
- const rawData = fs.readFileSync(reviewPath, 'utf-8');
+ const rawData = fs.readFileSync(reviewPath, "utf-8");
const sanitizedData = sanitizeNetworkData(rawData);
const data = JSON.parse(sanitizedData);
delete data.review_id;
- fs.writeFileSync(reviewPath, JSON.stringify(data, null, 2), 'utf-8');
- debugLog('Cleared review ID from result file', { prNumber });
+ fs.writeFileSync(reviewPath, JSON.stringify(data, null, 2), "utf-8");
+ debugLog("Cleared review ID from result file", { prNumber });
} catch {
// File doesn't exist or couldn't be read - this is expected if review wasn't saved
- debugLog('Review result file not found or unreadable, skipping update', { prNumber });
+ debugLog("Review result file not found or unreadable, skipping update", { prNumber });
}
return true;
} catch (error) {
- debugLog('Failed to delete review', { prNumber, error: error instanceof Error ? error.message : error });
+ debugLog("Failed to delete review", {
+ prNumber,
+ error: error instanceof Error ? error.message : error,
+ });
return false;
}
});
@@ -1207,33 +1545,41 @@ export function registerPRHandlers(
// Merge PR
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_MERGE,
- async (_, projectId: string, prNumber: number, mergeMethod: 'merge' | 'squash' | 'rebase' = 'squash'): Promise => {
- debugLog('mergePR handler called', { projectId, prNumber, mergeMethod });
+ async (
+ _,
+ projectId: string,
+ prNumber: number,
+ mergeMethod: "merge" | "squash" | "rebase" = "squash"
+ ): Promise => {
+ debugLog("mergePR handler called", { projectId, prNumber, mergeMethod });
const mergeResult = await withProjectOrNull(projectId, async (project) => {
try {
- const { execFileSync } = await import('child_process');
- debugLog('Merging PR', { prNumber, method: mergeMethod });
+ const { execFileSync } = await import("child_process");
+ debugLog("Merging PR", { prNumber, method: mergeMethod });
// Validate prNumber to prevent command injection
if (!Number.isInteger(prNumber) || prNumber <= 0) {
- throw new Error('Invalid PR number');
+ throw new Error("Invalid PR number");
}
// Validate mergeMethod to prevent command injection
- const validMethods = ['merge', 'squash', 'rebase'];
+ const validMethods = ["merge", "squash", "rebase"];
if (!validMethods.includes(mergeMethod)) {
- throw new Error('Invalid merge method');
+ throw new Error("Invalid merge method");
}
// Use execFileSync with arguments array to prevent command injection
- execFileSync('gh', ['pr', 'merge', String(prNumber), `--${mergeMethod}`], {
+ execFileSync("gh", ["pr", "merge", String(prNumber), `--${mergeMethod}`], {
cwd: project.path,
env: getAugmentedEnv(),
});
- debugLog('PR merged successfully', { prNumber });
+ debugLog("PR merged successfully", { prNumber });
return true;
} catch (error) {
- debugLog('Failed to merge PR', { prNumber, error: error instanceof Error ? error.message : error });
+ debugLog("Failed to merge PR", {
+ prNumber,
+ error: error instanceof Error ? error.message : error,
+ });
return false;
}
});
@@ -1245,25 +1591,25 @@ export function registerPRHandlers(
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_ASSIGN,
async (_, projectId: string, prNumber: number, username: string): Promise => {
- debugLog('assignPR handler called', { projectId, prNumber, username });
+ debugLog("assignPR handler called", { projectId, prNumber, username });
const assignResult = await withProjectOrNull(projectId, async (project) => {
const config = getGitHubConfig(project);
if (!config) return false;
try {
// Use GitHub API to add assignee
- await githubFetch(
- config.token,
- `/repos/${config.repo}/issues/${prNumber}/assignees`,
- {
- method: 'POST',
- body: JSON.stringify({ assignees: [username] }),
- }
- );
- debugLog('User assigned successfully', { prNumber, username });
+ await githubFetch(config.token, `/repos/${config.repo}/issues/${prNumber}/assignees`, {
+ method: "POST",
+ body: JSON.stringify({ assignees: [username] }),
+ });
+ debugLog("User assigned successfully", { prNumber, username });
return true;
} catch (error) {
- debugLog('Failed to assign user', { prNumber, username, error: error instanceof Error ? error.message : error });
+ debugLog("Failed to assign user", {
+ prNumber,
+ username,
+ error: error instanceof Error ? error.message : error,
+ });
return false;
}
});
@@ -1275,33 +1621,36 @@ export function registerPRHandlers(
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_REVIEW_CANCEL,
async (_, projectId: string, prNumber: number): Promise => {
- debugLog('cancelPRReview handler called', { projectId, prNumber });
+ debugLog("cancelPRReview handler called", { projectId, prNumber });
const reviewKey = getReviewKey(projectId, prNumber);
const childProcess = runningReviews.get(reviewKey);
if (!childProcess) {
- debugLog('No running review found to cancel', { reviewKey });
+ debugLog("No running review found to cancel", { reviewKey });
return false;
}
try {
- debugLog('Killing review process', { reviewKey, pid: childProcess.pid });
- childProcess.kill('SIGTERM');
+ debugLog("Killing review process", { reviewKey, pid: childProcess.pid });
+ childProcess.kill("SIGTERM");
// Give it a moment to terminate gracefully, then force kill if needed
setTimeout(() => {
if (!childProcess.killed) {
- debugLog('Force killing review process', { reviewKey, pid: childProcess.pid });
- childProcess.kill('SIGKILL');
+ debugLog("Force killing review process", { reviewKey, pid: childProcess.pid });
+ childProcess.kill("SIGKILL");
}
}, 1000);
// Clean up the registry
runningReviews.delete(reviewKey);
- debugLog('Review process cancelled', { reviewKey });
+ debugLog("Review process cancelled", { reviewKey });
return true;
} catch (error) {
- debugLog('Failed to cancel review', { reviewKey, error: error instanceof Error ? error.message : error });
+ debugLog("Failed to cancel review", {
+ reviewKey,
+ error: error instanceof Error ? error.message : error,
+ });
return false;
}
}
@@ -1311,16 +1660,16 @@ export function registerPRHandlers(
ipcMain.handle(
IPC_CHANNELS.GITHUB_PR_CHECK_NEW_COMMITS,
async (_, projectId: string, prNumber: number): Promise => {
- debugLog('checkNewCommits handler called', { projectId, prNumber });
+ debugLog("checkNewCommits handler called", { projectId, prNumber });
const result = await withProjectOrNull(projectId, async (project) => {
// Check if review exists and has reviewed_commit_sha
- const githubDir = path.join(project.path, '.auto-claude', 'github');
- const reviewPath = path.join(githubDir, 'pr', `review_${prNumber}.json`);
+ const githubDir = path.join(project.path, ".auto-claude", "github");
+ const reviewPath = path.join(githubDir, "pr", `review_${prNumber}.json`);
let review: PRReviewResult;
try {
- const rawData = fs.readFileSync(reviewPath, 'utf-8');
+ const rawData = fs.readFileSync(reviewPath, "utf-8");
const sanitizedData = sanitizeNetworkData(rawData);
review = JSON.parse(sanitizedData);
} catch {
@@ -1328,10 +1677,10 @@ export function registerPRHandlers(
return { hasNewCommits: false, newCommitCount: 0 };
}
- // Convert snake_case to camelCase for the field
- const reviewedCommitSha = review.reviewedCommitSha || (review as any).reviewed_commit_sha;
+ // Normalize snake_case to camelCase for backwards compatibility with old saved files
+ const reviewedCommitSha = review.reviewedCommitSha ?? (review as any).reviewed_commit_sha;
if (!reviewedCommitSha) {
- debugLog('No reviewedCommitSha in review', { prNumber });
+ debugLog("No reviewedCommitSha in review", { prNumber });
return { hasNewCommits: false, newCommitCount: 0 };
}
@@ -1350,7 +1699,10 @@ export function registerPRHandlers(
)) as { head: { sha: string }; commits: number };
currentHeadSha = prData.head.sha;
} catch (error) {
- debugLog('Error fetching PR data', { prNumber, error: error instanceof Error ? error.message : error });
+ debugLog("Error fetching PR data", {
+ prNumber,
+ error: error instanceof Error ? error.message : error,
+ });
return { hasNewCommits: false, newCommitCount: 0 };
}
@@ -1371,7 +1723,11 @@ export function registerPRHandlers(
const comparison = (await githubFetch(
config.token,
`/repos/${config.repo}/compare/${reviewedCommitSha}...${currentHeadSha}`
- )) as { ahead_by?: number; total_commits?: number; commits?: Array<{ commit: { committer: { date: string } } }> };
+ )) as {
+ ahead_by?: number;
+ total_commits?: number;
+ commits?: Array<{ commit: { committer: { date: string } } }>;
+ };
// Check if findings have been posted and if new commits are after the posting date
const postedAt = review.postedAt || (review as any).posted_at;
@@ -1380,14 +1736,15 @@ export function registerPRHandlers(
if (postedAt && comparison.commits && comparison.commits.length > 0) {
const postedAtDate = new Date(postedAt);
// Check if any commit is newer than when findings were posted
- hasCommitsAfterPosting = comparison.commits.some(c => {
+ hasCommitsAfterPosting = comparison.commits.some((c) => {
const commitDate = new Date(c.commit.committer.date);
return commitDate > postedAtDate;
});
- debugLog('Comparing commit dates with posted_at', {
+ debugLog("Comparing commit dates with posted_at", {
prNumber,
postedAt,
- latestCommitDate: comparison.commits[comparison.commits.length - 1]?.commit.committer.date,
+ latestCommitDate:
+ comparison.commits[comparison.commits.length - 1]?.commit.committer.date,
hasCommitsAfterPosting,
});
} else if (!postedAt) {
@@ -1405,12 +1762,15 @@ export function registerPRHandlers(
} catch (error) {
// Comparison failed (e.g., force push made old commit unreachable)
// Since we already verified SHAs differ, treat as having new commits
- debugLog('Comparison failed but SHAs differ - likely force push, treating as new commits', {
- prNumber,
- reviewedCommitSha,
- currentHeadSha,
- error: error instanceof Error ? error.message : error
- });
+ debugLog(
+ "Comparison failed but SHAs differ - likely force push, treating as new commits",
+ {
+ prNumber,
+ reviewedCommitSha,
+ currentHeadSha,
+ error: error instanceof Error ? error.message : error,
+ }
+ );
return {
hasNewCommits: true,
newCommitCount: 1, // Unknown count due to force push
@@ -1425,20 +1785,165 @@ export function registerPRHandlers(
}
);
+ // Check merge readiness (lightweight freshness check for verdict validation)
+ ipcMain.handle(
+ IPC_CHANNELS.GITHUB_PR_CHECK_MERGE_READINESS,
+ async (_, projectId: string, prNumber: number): Promise => {
+ debugLog("checkMergeReadiness handler called", { projectId, prNumber });
+
+ const defaultResult: MergeReadiness = {
+ isDraft: false,
+ mergeable: "UNKNOWN",
+ isBehind: false,
+ ciStatus: "none",
+ blockers: [],
+ };
+
+ const result = await withProjectOrNull(projectId, async (project) => {
+ const config = getGitHubConfig(project);
+ if (!config) {
+ debugLog("No GitHub config found for checkMergeReadiness");
+ return defaultResult;
+ }
+
+ try {
+ // Fetch PR data including mergeable status
+ const pr = (await githubFetch(
+ config.token,
+ `/repos/${config.repo}/pulls/${prNumber}`
+ )) as {
+ draft: boolean;
+ mergeable: boolean | null;
+ mergeable_state: string;
+ head: { sha: string };
+ };
+
+ // Determine mergeable status
+ let mergeable: MergeReadiness["mergeable"] = "UNKNOWN";
+ if (pr.mergeable === true) {
+ mergeable = "MERGEABLE";
+ } else if (pr.mergeable === false || pr.mergeable_state === "dirty") {
+ mergeable = "CONFLICTING";
+ }
+
+ // Check if branch is behind base (out of date)
+ // GitHub's mergeable_state can be: 'behind', 'blocked', 'clean', 'dirty', 'has_hooks', 'unknown', 'unstable'
+ const isBehind = pr.mergeable_state === "behind";
+
+ // Fetch combined commit status for CI
+ let ciStatus: MergeReadiness["ciStatus"] = "none";
+ try {
+ const status = (await githubFetch(
+ config.token,
+ `/repos/${config.repo}/commits/${pr.head.sha}/status`
+ )) as {
+ state: "success" | "pending" | "failure" | "error";
+ total_count: number;
+ };
+
+ if (status.total_count === 0) {
+ // No status checks, check for check runs (GitHub Actions)
+ const checkRuns = (await githubFetch(
+ config.token,
+ `/repos/${config.repo}/commits/${pr.head.sha}/check-runs`
+ )) as {
+ total_count: number;
+ check_runs: Array<{ conclusion: string | null; status: string }>;
+ };
+
+ if (checkRuns.total_count > 0) {
+ const hasFailing = checkRuns.check_runs.some(
+ (cr) => cr.conclusion === "failure" || cr.conclusion === "cancelled"
+ );
+ const hasPending = checkRuns.check_runs.some((cr) => cr.status !== "completed");
+
+ if (hasFailing) {
+ ciStatus = "failing";
+ } else if (hasPending) {
+ ciStatus = "pending";
+ } else {
+ ciStatus = "passing";
+ }
+ }
+ } else {
+ // Use combined status
+ if (status.state === "success") {
+ ciStatus = "passing";
+ } else if (status.state === "pending") {
+ ciStatus = "pending";
+ } else {
+ ciStatus = "failing";
+ }
+ }
+ } catch (err) {
+ debugLog("Failed to fetch CI status", {
+ prNumber,
+ error: err instanceof Error ? err.message : err,
+ });
+ // Continue without CI status
+ }
+
+ // Build blockers list
+ const blockers: string[] = [];
+ if (pr.draft) {
+ blockers.push("PR is in draft mode");
+ }
+ if (mergeable === "CONFLICTING") {
+ blockers.push("Merge conflicts detected");
+ }
+ if (isBehind) {
+ blockers.push("Branch is out of date with base branch. Update to check for conflicts.");
+ }
+ if (ciStatus === "failing") {
+ blockers.push("CI checks are failing");
+ }
+
+ debugLog("checkMergeReadiness result", {
+ prNumber,
+ isDraft: pr.draft,
+ mergeable,
+ isBehind,
+ ciStatus,
+ blockers,
+ });
+
+ return {
+ isDraft: pr.draft,
+ mergeable,
+ isBehind,
+ ciStatus,
+ blockers,
+ };
+ } catch (error) {
+ debugLog("Failed to check merge readiness", {
+ prNumber,
+ error: error instanceof Error ? error.message : error,
+ });
+ return defaultResult;
+ }
+ });
+
+ return result ?? defaultResult;
+ }
+ );
+
// Run follow-up review
ipcMain.on(
IPC_CHANNELS.GITHUB_PR_FOLLOWUP_REVIEW,
async (_, projectId: string, prNumber: number) => {
- debugLog('followupReview handler called', { projectId, prNumber });
+ debugLog("followupReview handler called", { projectId, prNumber });
const mainWindow = getMainWindow();
if (!mainWindow) {
- debugLog('No main window available');
+ debugLog("No main window available");
return;
}
try {
await withProjectOrNull(projectId, async (project) => {
- const { sendProgress, sendError, sendComplete } = createIPCCommunicators(
+ const { sendProgress, sendError, sendComplete } = createIPCCommunicators<
+ PRReviewProgress,
+ PRReviewResult
+ >(
mainWindow,
{
progress: IPC_CHANNELS.GITHUB_PR_REVIEW_PROGRESS,
@@ -1451,7 +1956,7 @@ export function registerPRHandlers(
// Comprehensive validation of GitHub module
const validation = await validateGitHubModule(project);
if (!validation.valid) {
- sendError({ prNumber, error: validation.error || 'GitHub module validation failed' });
+ sendError({ prNumber, error: validation.error || "GitHub module validation failed" });
return;
}
@@ -1460,39 +1965,36 @@ export function registerPRHandlers(
// Check if already running
if (runningReviews.has(reviewKey)) {
- debugLog('Follow-up review already running', { reviewKey });
+ debugLog("Follow-up review already running", { reviewKey });
return;
}
- debugLog('Starting follow-up review', { prNumber });
+ debugLog("Starting follow-up review", { prNumber });
sendProgress({
- phase: 'fetching',
+ phase: "fetching",
prNumber,
progress: 5,
- message: 'Starting follow-up review...',
+ message: "Starting follow-up review...",
});
const { model, thinkingLevel } = getGitHubPRSettings();
const args = buildRunnerArgs(
getRunnerPath(backendPath),
project.path,
- 'followup-review-pr',
+ "followup-review-pr",
[prNumber.toString()],
{ model, thinkingLevel }
);
- debugLog('Spawning follow-up review process', { args, model, thinkingLevel });
+ debugLog("Spawning follow-up review process", { args, model, thinkingLevel });
// Create log collector for this follow-up review
const config = getGitHubConfig(project);
- const repo = config?.repo || project.name || 'unknown';
+ const repo = config?.repo || project.name || "unknown";
const logCollector = new PRLogCollector(project, prNumber, repo, true);
// Build environment with project settings
- const followupEnv: Record = {};
- if (project.settings?.useClaudeMd !== false) {
- followupEnv['USE_CLAUDE_MD'] = 'true';
- }
+ const followupEnv = await getRunnerEnv(getClaudeMdEnv(project));
const { process: childProcess, promise } = runPythonSubprocess({
pythonPath: getPythonPath(backendPath),
@@ -1500,34 +2002,36 @@ export function registerPRHandlers(
cwd: backendPath,
env: followupEnv,
onProgress: (percent, message) => {
- debugLog('Progress update', { percent, message });
+ debugLog("Progress update", { percent, message });
sendProgress({
- phase: 'analyzing',
+ phase: "analyzing",
prNumber,
progress: percent,
message,
});
},
onStdout: (line) => {
- debugLog('STDOUT:', line);
+ debugLog("STDOUT:", line);
// Collect log entries
logCollector.processLine(line);
},
- onStderr: (line) => debugLog('STDERR:', line),
+ onStderr: (line) => debugLog("STDERR:", line),
onComplete: () => {
// Load the result from disk
const reviewResult = getReviewResult(project, prNumber);
if (!reviewResult) {
- throw new Error('Follow-up review completed but result not found');
+ throw new Error("Follow-up review completed but result not found");
}
- debugLog('Follow-up review result loaded', { findingsCount: reviewResult.findings.length });
+ debugLog("Follow-up review result loaded", {
+ findingsCount: reviewResult.findings.length,
+ });
return reviewResult;
},
});
// Register the running process
runningReviews.set(reviewKey, childProcess);
- debugLog('Registered follow-up review process', { reviewKey, pid: childProcess.pid });
+ debugLog("Registered follow-up review process", { reviewKey, pid: childProcess.pid });
try {
const result = await promise;
@@ -1535,28 +2039,39 @@ export function registerPRHandlers(
if (!result.success) {
// Finalize logs with failure
logCollector.finalize(false);
- throw new Error(result.error ?? 'Follow-up review failed');
+ throw new Error(result.error ?? "Follow-up review failed");
}
// Finalize logs with success
logCollector.finalize(true);
- debugLog('Follow-up review completed', { prNumber, findingsCount: result.data?.findings.length });
+ // Save follow-up PR review insights to memory (async, non-blocking)
+ savePRReviewToMemory(result.data!, repo, true).catch((err) => {
+ debugLog("Failed to save follow-up PR review to memory", { error: err.message });
+ });
+
+ debugLog("Follow-up review completed", {
+ prNumber,
+ findingsCount: result.data?.findings.length,
+ });
sendProgress({
- phase: 'complete',
+ phase: "complete",
prNumber,
progress: 100,
- message: 'Follow-up review complete!',
+ message: "Follow-up review complete!",
});
sendComplete(result.data!);
} finally {
runningReviews.delete(reviewKey);
- debugLog('Unregistered follow-up review process', { reviewKey });
+ debugLog("Unregistered follow-up review process", { reviewKey });
}
});
} catch (error) {
- debugLog('Follow-up review failed', { prNumber, error: error instanceof Error ? error.message : error });
+ debugLog("Follow-up review failed", {
+ prNumber,
+ error: error instanceof Error ? error.message : error,
+ });
const { sendError } = createIPCCommunicators(
mainWindow,
{
@@ -1566,10 +2081,265 @@ export function registerPRHandlers(
},
projectId
);
- sendError({ prNumber, error: error instanceof Error ? error.message : 'Failed to run follow-up review' });
+ sendError({
+ prNumber,
+ error: error instanceof Error ? error.message : "Failed to run follow-up review",
+ });
}
}
);
- debugLog('PR handlers registered');
+ // Get workflows awaiting approval for a PR (fork PRs)
+ ipcMain.handle(
+ IPC_CHANNELS.GITHUB_WORKFLOWS_AWAITING_APPROVAL,
+ async (
+ _,
+ projectId: string,
+ prNumber: number
+ ): Promise<{
+ awaiting_approval: number;
+ workflow_runs: Array<{ id: number; name: string; html_url: string; workflow_name: string }>;
+ can_approve: boolean;
+ error?: string;
+ }> => {
+ debugLog("getWorkflowsAwaitingApproval handler called", { projectId, prNumber });
+ const result = await withProjectOrNull(projectId, async (project) => {
+ const config = getGitHubConfig(project);
+ if (!config) {
+ return {
+ awaiting_approval: 0,
+ workflow_runs: [],
+ can_approve: false,
+ error: "No GitHub config",
+ };
+ }
+
+ try {
+ // First get the PR's head SHA
+ const prData = (await githubFetch(
+ config.token,
+ `/repos/${config.repo}/pulls/${prNumber}`
+ )) as { head?: { sha?: string } };
+
+ const headSha = prData?.head?.sha;
+ if (!headSha) {
+ return { awaiting_approval: 0, workflow_runs: [], can_approve: false };
+ }
+
+ // Query workflow runs with action_required status
+ const runsData = (await githubFetch(
+ config.token,
+ `/repos/${config.repo}/actions/runs?status=action_required&per_page=100`
+ )) as {
+ workflow_runs?: Array<{
+ id: number;
+ name: string;
+ html_url: string;
+ head_sha: string;
+ workflow?: { name?: string };
+ }>;
+ };
+
+ const allRuns = runsData?.workflow_runs || [];
+
+ // Filter to only runs for this PR's head SHA
+ const prRuns = allRuns
+ .filter((run) => run.head_sha === headSha)
+ .map((run) => ({
+ id: run.id,
+ name: run.name,
+ html_url: run.html_url,
+ workflow_name: run.workflow?.name || "Unknown",
+ }));
+
+ debugLog("Found workflows awaiting approval", { prNumber, count: prRuns.length });
+
+ return {
+ awaiting_approval: prRuns.length,
+ workflow_runs: prRuns,
+ can_approve: true, // Assume token has permission; will fail if not
+ };
+ } catch (error) {
+ debugLog("Failed to get workflows awaiting approval", {
+ prNumber,
+ error: error instanceof Error ? error.message : error,
+ });
+ return {
+ awaiting_approval: 0,
+ workflow_runs: [],
+ can_approve: false,
+ error: error instanceof Error ? error.message : "Unknown error",
+ };
+ }
+ });
+
+ return result ?? { awaiting_approval: 0, workflow_runs: [], can_approve: false };
+ }
+ );
+
+ // Approve a workflow run
+ ipcMain.handle(
+ IPC_CHANNELS.GITHUB_WORKFLOW_APPROVE,
+ async (_, projectId: string, runId: number): Promise => {
+ debugLog("approveWorkflow handler called", { projectId, runId });
+ const result = await withProjectOrNull(projectId, async (project) => {
+ const config = getGitHubConfig(project);
+ if (!config) {
+ debugLog("No GitHub config found");
+ return false;
+ }
+
+ try {
+ // Approve the workflow run
+ await githubFetch(config.token, `/repos/${config.repo}/actions/runs/${runId}/approve`, {
+ method: "POST",
+ });
+
+ debugLog("Workflow approved successfully", { runId });
+ return true;
+ } catch (error) {
+ debugLog("Failed to approve workflow", {
+ runId,
+ error: error instanceof Error ? error.message : error,
+ });
+ return false;
+ }
+ });
+
+ return result ?? false;
+ }
+ );
+
+ // Get PR review memories from the memory layer
+ ipcMain.handle(
+ IPC_CHANNELS.GITHUB_PR_MEMORY_GET,
+ async (_, projectId: string, limit: number = 10): Promise => {
+ debugLog("getPRReviewMemories handler called", { projectId, limit });
+ const result = await withProjectOrNull(projectId, async (project) => {
+ const memoryDir = path.join(getGitHubDir(project), "memory", project.name || "unknown");
+ const memories: PRReviewMemory[] = [];
+
+ // Try to load from file-based storage
+ try {
+ const indexPath = path.join(memoryDir, "reviews_index.json");
+ if (!fs.existsSync(indexPath)) {
+ debugLog("No PR review memories found", { projectId });
+ return [];
+ }
+
+ const indexContent = fs.readFileSync(indexPath, "utf-8");
+ const index = JSON.parse(sanitizeNetworkData(indexContent));
+ const reviews = index.reviews || [];
+
+ // Load individual review memories
+ for (const entry of reviews.slice(0, limit)) {
+ try {
+ const reviewPath = path.join(memoryDir, `pr_${entry.pr_number}_review.json`);
+ if (fs.existsSync(reviewPath)) {
+ const reviewContent = fs.readFileSync(reviewPath, "utf-8");
+ const memory = JSON.parse(sanitizeNetworkData(reviewContent));
+ memories.push({
+ prNumber: memory.pr_number,
+ repo: memory.repo,
+ verdict: memory.summary?.verdict || "unknown",
+ timestamp: memory.timestamp,
+ summary: memory.summary,
+ keyFindings: memory.key_findings || [],
+ patterns: memory.patterns || [],
+ gotchas: memory.gotchas || [],
+ isFollowup: memory.is_followup || false,
+ });
+ }
+ } catch (err) {
+ debugLog("Failed to load PR review memory", {
+ prNumber: entry.pr_number,
+ error: err instanceof Error ? err.message : err,
+ });
+ }
+ }
+
+ debugLog("Loaded PR review memories", { count: memories.length });
+ return memories;
+ } catch (error) {
+ debugLog("Failed to load PR review memories", {
+ error: error instanceof Error ? error.message : error,
+ });
+ return [];
+ }
+ });
+ return result ?? [];
+ }
+ );
+
+ // Search PR review memories
+ ipcMain.handle(
+ IPC_CHANNELS.GITHUB_PR_MEMORY_SEARCH,
+ async (_, projectId: string, query: string, limit: number = 10): Promise => {
+ debugLog("searchPRReviewMemories handler called", { projectId, query, limit });
+ const result = await withProjectOrNull(projectId, async (project) => {
+ const memoryDir = path.join(getGitHubDir(project), "memory", project.name || "unknown");
+ const memories: PRReviewMemory[] = [];
+ const queryLower = query.toLowerCase();
+
+ // Search through file-based storage
+ try {
+ const indexPath = path.join(memoryDir, "reviews_index.json");
+ if (!fs.existsSync(indexPath)) {
+ return [];
+ }
+
+ const indexContent = fs.readFileSync(indexPath, "utf-8");
+ const index = JSON.parse(sanitizeNetworkData(indexContent));
+ const reviews = index.reviews || [];
+
+ // Search individual review memories
+ for (const entry of reviews) {
+ try {
+ const reviewPath = path.join(memoryDir, `pr_${entry.pr_number}_review.json`);
+ if (fs.existsSync(reviewPath)) {
+ const reviewContent = fs.readFileSync(reviewPath, "utf-8");
+
+ // Check if content matches query
+ if (reviewContent.toLowerCase().includes(queryLower)) {
+ const memory = JSON.parse(sanitizeNetworkData(reviewContent));
+ memories.push({
+ prNumber: memory.pr_number,
+ repo: memory.repo,
+ verdict: memory.summary?.verdict || "unknown",
+ timestamp: memory.timestamp,
+ summary: memory.summary,
+ keyFindings: memory.key_findings || [],
+ patterns: memory.patterns || [],
+ gotchas: memory.gotchas || [],
+ isFollowup: memory.is_followup || false,
+ });
+ }
+ }
+
+ // Stop if we have enough
+ if (memories.length >= limit) {
+ break;
+ }
+ } catch (err) {
+ debugLog("Failed to search PR review memory", {
+ prNumber: entry.pr_number,
+ error: err instanceof Error ? err.message : err,
+ });
+ }
+ }
+
+ debugLog("Found matching PR review memories", { count: memories.length, query });
+ return memories;
+ } catch (error) {
+ debugLog("Failed to search PR review memories", {
+ error: error instanceof Error ? error.message : error,
+ });
+ return [];
+ }
+ });
+ return result ?? [];
+ }
+ );
+
+ debugLog("PR handlers registered");
}
diff --git a/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts b/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts
index b233f59bb1..7e71b12640 100644
--- a/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts
+++ b/apps/frontend/src/main/ipc-handlers/github/spec-utils.ts
@@ -8,6 +8,7 @@ import { AUTO_BUILD_PATHS, getSpecsDir } from '../../../shared/constants';
import type { Project, TaskMetadata } from '../../../shared/types';
import { withSpecNumberLock } from '../../utils/spec-number-lock';
import { debugLog } from './utils/logger';
+import { labelMatchesWholeWord } from '../shared/label-utils';
export interface SpecCreationData {
specId: string;
@@ -55,7 +56,14 @@ function determineCategoryFromLabels(labels: string[]): 'feature' | 'bug_fix' |
}
// Check for infrastructure labels
- if (lowerLabels.some(l => l.includes('infrastructure') || l.includes('devops') || l.includes('deployment') || l.includes('ci') || l.includes('cd'))) {
+ // Use whole-word matching for 'ci' and 'cd' to avoid false positives like 'acid' or 'decide'
+ if (lowerLabels.some(l =>
+ l.includes('infrastructure') ||
+ l.includes('devops') ||
+ l.includes('deployment') ||
+ labelMatchesWholeWord(l, 'ci') ||
+ labelMatchesWholeWord(l, 'cd')
+ )) {
return 'infrastructure';
}
@@ -89,7 +97,8 @@ export async function createSpecForIssue(
issueTitle: string,
taskDescription: string,
githubUrl: string,
- labels: string[] = []
+ labels: string[] = [],
+ baseBranch?: string
): Promise {
const specsBaseDir = getSpecsDir(project.autoBuildPath);
const specsDir = path.join(project.path, specsBaseDir);
@@ -144,7 +153,10 @@ export async function createSpecForIssue(
sourceType: 'github',
githubIssueNumber: issueNumber,
githubUrl,
- category
+ category,
+ // Store baseBranch for worktree creation and QA comparison
+ // This comes from project.settings.mainBranch or task-level override
+ ...(baseBranch && { baseBranch })
};
writeFileSync(
path.join(specDir, 'task_metadata.json'),
diff --git a/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts b/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts
index 7e0f960be5..a84e44a79c 100644
--- a/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/github/triage-handlers.ts
@@ -19,6 +19,7 @@ import type { Project, AppSettings } from '../../../shared/types';
import { createContextLogger } from './utils/logger';
import { withProjectOrNull } from './utils/project-middleware';
import { createIPCCommunicators } from './utils/ipc-communicator';
+import { getRunnerEnv } from './utils/runner-env';
import {
runPythonSubprocess,
getPythonPath,
@@ -254,10 +255,13 @@ async function runTriage(
debugLog('Spawning triage process', { args, model, thinkingLevel });
+ const subprocessEnv = await getRunnerEnv();
+
const { promise } = runPythonSubprocess({
pythonPath: getPythonPath(backendPath),
args,
cwd: backendPath,
+ env: subprocessEnv,
onProgress: (percent, message) => {
debugLog('Progress update', { percent, message });
sendProgress({
diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/__tests__/runner-env.test.ts b/apps/frontend/src/main/ipc-handlers/github/utils/__tests__/runner-env.test.ts
new file mode 100644
index 0000000000..0ffd9fa29d
--- /dev/null
+++ b/apps/frontend/src/main/ipc-handlers/github/utils/__tests__/runner-env.test.ts
@@ -0,0 +1,122 @@
+import { describe, it, expect, vi, beforeEach } from 'vitest';
+
+const mockGetAPIProfileEnv = vi.fn();
+const mockGetOAuthModeClearVars = vi.fn();
+const mockGetPythonEnv = vi.fn();
+const mockGetProfileEnv = vi.fn();
+
+vi.mock('../../../../services/profile', () => ({
+ getAPIProfileEnv: (...args: unknown[]) => mockGetAPIProfileEnv(...args),
+}));
+
+vi.mock('../../../../agent/env-utils', () => ({
+ getOAuthModeClearVars: (...args: unknown[]) => mockGetOAuthModeClearVars(...args),
+}));
+
+vi.mock('../../../../python-env-manager', () => ({
+ pythonEnvManager: {
+ getPythonEnv: () => mockGetPythonEnv(),
+ },
+}));
+
+vi.mock('../../../../rate-limit-detector', () => ({
+ getProfileEnv: () => mockGetProfileEnv(),
+}));
+
+import { getRunnerEnv } from '../runner-env';
+
+describe('getRunnerEnv', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ // Default mock for Python env - minimal env for testing
+ mockGetPythonEnv.mockReturnValue({
+ PYTHONDONTWRITEBYTECODE: '1',
+ PYTHONIOENCODING: 'utf-8',
+ PYTHONNOUSERSITE: '1',
+ PYTHONPATH: '/bundled/site-packages',
+ });
+ // Default mock for profile env - returns empty by default
+ mockGetProfileEnv.mockReturnValue({});
+ });
+
+ it('merges Python env with API profile env and OAuth clear vars', async () => {
+ mockGetAPIProfileEnv.mockResolvedValue({
+ ANTHROPIC_AUTH_TOKEN: 'token',
+ ANTHROPIC_BASE_URL: 'https://api.example.com',
+ });
+ mockGetOAuthModeClearVars.mockReturnValue({
+ ANTHROPIC_AUTH_TOKEN: '',
+ });
+
+ const result = await getRunnerEnv();
+
+ expect(mockGetOAuthModeClearVars).toHaveBeenCalledWith({
+ ANTHROPIC_AUTH_TOKEN: 'token',
+ ANTHROPIC_BASE_URL: 'https://api.example.com',
+ });
+ // Python env is included first, then overridden by OAuth clear vars
+ expect(result).toMatchObject({
+ PYTHONPATH: '/bundled/site-packages',
+ PYTHONDONTWRITEBYTECODE: '1',
+ ANTHROPIC_AUTH_TOKEN: '',
+ ANTHROPIC_BASE_URL: 'https://api.example.com',
+ });
+ });
+
+ it('includes extra env values with highest precedence', async () => {
+ mockGetAPIProfileEnv.mockResolvedValue({
+ ANTHROPIC_AUTH_TOKEN: 'token',
+ });
+ mockGetOAuthModeClearVars.mockReturnValue({});
+
+ const result = await getRunnerEnv({ USE_CLAUDE_MD: 'true' });
+
+ expect(result).toMatchObject({
+ PYTHONPATH: '/bundled/site-packages',
+ ANTHROPIC_AUTH_TOKEN: 'token',
+ USE_CLAUDE_MD: 'true',
+ });
+ });
+
+ it('includes PYTHONPATH for bundled packages (fixes #139)', async () => {
+ mockGetAPIProfileEnv.mockResolvedValue({});
+ mockGetOAuthModeClearVars.mockReturnValue({});
+ mockGetPythonEnv.mockReturnValue({
+ PYTHONPATH: '/app/Contents/Resources/python-site-packages',
+ });
+
+ const result = await getRunnerEnv();
+
+ expect(result.PYTHONPATH).toBe('/app/Contents/Resources/python-site-packages');
+ });
+
+ it('includes profileEnv for OAuth token (fixes #563)', async () => {
+ mockGetAPIProfileEnv.mockResolvedValue({});
+ mockGetOAuthModeClearVars.mockReturnValue({});
+ mockGetProfileEnv.mockReturnValue({
+ CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token-123',
+ });
+
+ const result = await getRunnerEnv();
+
+ expect(result.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token-123');
+ });
+
+ it('applies correct precedence order with profileEnv overriding pythonEnv', async () => {
+ mockGetPythonEnv.mockReturnValue({
+ SHARED_VAR: 'from-python',
+ });
+ mockGetAPIProfileEnv.mockResolvedValue({
+ SHARED_VAR: 'from-api-profile',
+ });
+ mockGetOAuthModeClearVars.mockReturnValue({});
+ mockGetProfileEnv.mockReturnValue({
+ SHARED_VAR: 'from-profile',
+ });
+
+ const result = await getRunnerEnv({ SHARED_VAR: 'from-extra' });
+
+ // extraEnv has highest precedence
+ expect(result.SHARED_VAR).toBe('from-extra');
+ });
+});
diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/runner-env.ts b/apps/frontend/src/main/ipc-handlers/github/utils/runner-env.ts
new file mode 100644
index 0000000000..ace24490bc
--- /dev/null
+++ b/apps/frontend/src/main/ipc-handlers/github/utils/runner-env.ts
@@ -0,0 +1,38 @@
+import { getOAuthModeClearVars } from '../../../agent/env-utils';
+import { getAPIProfileEnv } from '../../../services/profile';
+import { getProfileEnv } from '../../../rate-limit-detector';
+import { pythonEnvManager } from '../../../python-env-manager';
+
+/**
+ * Get environment variables for Python runner subprocesses.
+ *
+ * Environment variable precedence (lowest to highest):
+ * 1. pythonEnv - Python environment including PYTHONPATH for bundled packages (fixes #139)
+ * 2. apiProfileEnv - Custom Anthropic-compatible API profile (ANTHROPIC_BASE_URL, ANTHROPIC_AUTH_TOKEN)
+ * 3. oauthModeClearVars - Clears stale ANTHROPIC_* vars when in OAuth mode
+ * 4. profileEnv - Claude OAuth token from profile manager (CLAUDE_CODE_OAUTH_TOKEN)
+ * 5. extraEnv - Caller-specific vars (e.g., USE_CLAUDE_MD)
+ *
+ * The pythonEnv is critical for packaged apps (#139) - without PYTHONPATH, Python
+ * cannot find bundled dependencies like dotenv, claude_agent_sdk, etc.
+ *
+ * The profileEnv is critical for OAuth authentication (#563) - it retrieves the
+ * decrypted OAuth token from the profile manager's encrypted storage (macOS Keychain
+ * via Electron's safeStorage API).
+ */
+export async function getRunnerEnv(
+ extraEnv?: Record
+): Promise> {
+ const pythonEnv = pythonEnvManager.getPythonEnv();
+ const apiProfileEnv = await getAPIProfileEnv();
+ const oauthModeClearVars = getOAuthModeClearVars(apiProfileEnv);
+ const profileEnv = getProfileEnv();
+
+ return {
+ ...pythonEnv, // Python environment including PYTHONPATH (fixes #139)
+ ...apiProfileEnv,
+ ...oauthModeClearVars,
+ ...profileEnv, // OAuth token from profile manager (fixes #563)
+ ...extraEnv,
+ };
+}
diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts
index 8fe079820b..8e22bc2863 100644
--- a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts
+++ b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.test.ts
@@ -4,11 +4,15 @@ import { runPythonSubprocess } from './subprocess-runner';
import * as childProcess from 'child_process';
import EventEmitter from 'events';
-// Mock child_process.spawn
-vi.mock('child_process', () => ({
- spawn: vi.fn(),
- exec: vi.fn(),
-}));
+// Mock child_process with importOriginal to preserve all exports
+vi.mock('child_process', async (importOriginal) => {
+ const actual = await importOriginal();
+ return {
+ ...actual,
+ spawn: vi.fn(),
+ exec: vi.fn(),
+ };
+});
// Mock parsePythonCommand
vi.mock('../../../python-detector', () => ({
@@ -45,12 +49,12 @@ describe('runPythonSubprocess', () => {
// Arrange
const pythonPath = '/path/with spaces/python';
const mockArgs = ['-c', 'print("hello")'];
-
- // Mock parsePythonCommand to return the path split logic if needed,
- // or just rely on the mock above.
+
+ // Mock parsePythonCommand to return the path split logic if needed,
+ // or just rely on the mock above.
// Let's make sure our mock enables the scenario we want.
vi.mocked(parsePythonCommand).mockReturnValue(['/path/with spaces/python', []]);
-
+
// Act
runPythonSubprocess({
pythonPath,
@@ -72,7 +76,7 @@ describe('runPythonSubprocess', () => {
const pythonPath = 'python';
const pythonBaseArgs = ['-u', '-X', 'utf8'];
const userArgs = ['script.py', '--verbose'];
-
+
// Setup mock to simulate what parsePythonCommand would return for a standard python path
vi.mocked(parsePythonCommand).mockReturnValue(['python', pythonBaseArgs]);
@@ -87,11 +91,126 @@ describe('runPythonSubprocess', () => {
// The critical check: verify the ORDER of arguments in the second parameter of spawn
// expect call to be: spawn('python', ['-u', '-X', 'utf8', 'script.py', '--verbose'], ...)
const expectedArgs = [...pythonBaseArgs, ...userArgs];
-
+
expect(mockSpawn).toHaveBeenCalledWith(
expect.any(String),
expectedArgs, // Exact array match verifies order
expect.any(Object)
);
});
+
+ describe('environment handling', () => {
+ it('should use caller-provided env directly when options.env is set', () => {
+ // Arrange
+ const customEnv = {
+ PATH: '/custom/path',
+ PYTHONPATH: '/custom/pythonpath',
+ ANTHROPIC_AUTH_TOKEN: 'custom-token',
+ };
+ vi.mocked(parsePythonCommand).mockReturnValue(['python', []]);
+
+ // Act
+ runPythonSubprocess({
+ pythonPath: 'python',
+ args: ['script.py'],
+ cwd: '/tmp',
+ env: customEnv,
+ });
+
+ // Assert - should use the exact env provided
+ expect(mockSpawn).toHaveBeenCalledWith(
+ expect.any(String),
+ expect.any(Array),
+ expect.objectContaining({
+ env: customEnv,
+ })
+ );
+ });
+
+ it('should create fallback env when options.env is not provided', () => {
+ // Arrange
+ const originalEnv = process.env;
+ try {
+ process.env = {
+ PATH: '/usr/bin',
+ HOME: '/home/user',
+ USER: 'testuser',
+ SHELL: '/bin/bash',
+ LANG: 'en_US.UTF-8',
+ CLAUDE_CODE_OAUTH_TOKEN: 'oauth-token',
+ ANTHROPIC_API_KEY: 'api-key',
+ SENSITIVE_VAR: 'should-not-leak',
+ };
+
+ vi.mocked(parsePythonCommand).mockReturnValue(['python', []]);
+
+ // Act
+ runPythonSubprocess({
+ pythonPath: 'python',
+ args: ['script.py'],
+ cwd: '/tmp',
+ // No env provided - should use fallback
+ });
+
+ // Assert - should only include safe vars
+ const spawnCall = mockSpawn.mock.calls[0];
+ const envArg = spawnCall[2].env;
+
+ // Safe vars should be included
+ expect(envArg.PATH).toBe('/usr/bin');
+ expect(envArg.HOME).toBe('/home/user');
+ expect(envArg.USER).toBe('testuser');
+
+ // CLAUDE_ and ANTHROPIC_ prefixed vars should be included
+ expect(envArg.CLAUDE_CODE_OAUTH_TOKEN).toBe('oauth-token');
+ expect(envArg.ANTHROPIC_API_KEY).toBe('api-key');
+
+ // Sensitive vars should NOT be included
+ expect(envArg.SENSITIVE_VAR).toBeUndefined();
+ } finally {
+ // Restore - always runs even if assertions fail
+ process.env = originalEnv;
+ }
+ });
+
+ it('fallback env should include platform-specific vars on Windows', () => {
+ // Arrange
+ const originalEnv = process.env;
+ try {
+ process.env = {
+ PATH: 'C:\\Windows\\System32',
+ SYSTEMROOT: 'C:\\Windows',
+ COMSPEC: 'C:\\Windows\\System32\\cmd.exe',
+ PATHEXT: '.COM;.EXE;.BAT',
+ WINDIR: 'C:\\Windows',
+ USERPROFILE: 'C:\\Users\\test',
+ APPDATA: 'C:\\Users\\test\\AppData\\Roaming',
+ LOCALAPPDATA: 'C:\\Users\\test\\AppData\\Local',
+ };
+
+ vi.mocked(parsePythonCommand).mockReturnValue(['python', []]);
+
+ // Act
+ runPythonSubprocess({
+ pythonPath: 'python',
+ args: ['script.py'],
+ cwd: '/tmp',
+ // No env provided - should use fallback
+ });
+
+ // Assert - Windows-specific vars should be included
+ const spawnCall = mockSpawn.mock.calls[0];
+ const envArg = spawnCall[2].env;
+
+ expect(envArg.SYSTEMROOT).toBe('C:\\Windows');
+ expect(envArg.COMSPEC).toBe('C:\\Windows\\System32\\cmd.exe');
+ expect(envArg.PATHEXT).toBe('.COM;.EXE;.BAT');
+ expect(envArg.USERPROFILE).toBe('C:\\Users\\test');
+ expect(envArg.APPDATA).toBe('C:\\Users\\test\\AppData\\Roaming');
+ } finally {
+ // Restore - always runs even if assertions fail
+ process.env = originalEnv;
+ }
+ });
+ });
});
diff --git a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.ts b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.ts
index db6ae7dc0e..5b1700cf1b 100644
--- a/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.ts
+++ b/apps/frontend/src/main/ipc-handlers/github/utils/subprocess-runner.ts
@@ -15,6 +15,36 @@ import { parsePythonCommand } from '../../../python-detector';
const execAsync = promisify(exec);
+/**
+ * Create a fallback environment for Python subprocesses when no env is provided.
+ * This is used for backwards compatibility when callers don't use getRunnerEnv().
+ *
+ * Includes:
+ * - Platform-specific vars needed for shell commands and CLI tools
+ * - CLAUDE_ and ANTHROPIC_ prefixed vars for authentication
+ */
+function createFallbackRunnerEnv(): Record {
+ // Include platform-specific vars needed for shell commands and CLI tools
+ // Windows: SYSTEMROOT, COMSPEC, PATHEXT, WINDIR for shell; USERPROFILE, APPDATA, LOCALAPPDATA for gh CLI auth
+ const safeEnvVars = ['PATH', 'HOME', 'USER', 'SHELL', 'LANG', 'LC_ALL', 'TERM', 'TMPDIR', 'TMP', 'TEMP', 'DEBUG', 'SYSTEMROOT', 'COMSPEC', 'PATHEXT', 'WINDIR', 'USERPROFILE', 'APPDATA', 'LOCALAPPDATA', 'HOMEDRIVE', 'HOMEPATH'];
+ const fallbackEnv: Record = {};
+
+ for (const key of safeEnvVars) {
+ if (process.env[key]) {
+ fallbackEnv[key] = process.env[key]!;
+ }
+ }
+
+ // Also include any CLAUDE_ or ANTHROPIC_ prefixed vars needed for auth
+ for (const [key, value] of Object.entries(process.env)) {
+ if ((key.startsWith('CLAUDE_') || key.startsWith('ANTHROPIC_')) && value) {
+ fallbackEnv[key] = value;
+ }
+ }
+
+ return fallbackEnv;
+}
+
/**
* Options for running a Python subprocess
*/
@@ -54,41 +84,30 @@ export interface SubprocessResult {
export function runPythonSubprocess(
options: SubprocessOptions
): { process: ChildProcess; promise: Promise> } {
- // Don't set PYTHONPATH - let runner.py manage its own import paths
- // Setting PYTHONPATH can interfere with runner.py's sys.path manipulation
- // Filter environment variables to only include necessary ones (prevent leaking secrets)
+ // Use the environment provided by the caller (from getRunnerEnv()).
+ // getRunnerEnv() provides:
+ // - pythonEnvManager.getPythonEnv() which includes PYTHONPATH for bundled packages (fixes #139)
+ // - API profile environment (ANTHROPIC_BASE_URL, ANTHROPIC_AUTH_TOKEN)
+ // - OAuth mode clearing vars
+ // - Claude OAuth token (CLAUDE_CODE_OAUTH_TOKEN)
+ //
+ // If no env is provided, fall back to filtered process.env for backwards compatibility.
// Note: DEBUG is included for PR review debugging (shows LLM thinking blocks).
- // This is safe because: (1) user must explicitly enable via npm run dev:debug,
- // (2) it only enables our internal debug logging, not third-party framework debugging,
- // (3) no sensitive values are logged - only LLM reasoning and response text.
- // Include platform-specific vars needed for shell commands and CLI tools
- // Windows: SYSTEMROOT, COMSPEC, PATHEXT, WINDIR for shell; USERPROFILE, APPDATA, LOCALAPPDATA for gh CLI auth
- const safeEnvVars = ['PATH', 'HOME', 'USER', 'SHELL', 'LANG', 'LC_ALL', 'TERM', 'TMPDIR', 'TMP', 'TEMP', 'DEBUG', 'SYSTEMROOT', 'COMSPEC', 'PATHEXT', 'WINDIR', 'USERPROFILE', 'APPDATA', 'LOCALAPPDATA', 'HOMEDRIVE', 'HOMEPATH'];
- const filteredEnv: Record = {};
- for (const key of safeEnvVars) {
- if (process.env[key]) {
- filteredEnv[key] = process.env[key]!;
- }
- }
- // Also include any CLAUDE_ or ANTHROPIC_ prefixed vars needed for auth
- for (const [key, value] of Object.entries(process.env)) {
- if ((key.startsWith('CLAUDE_') || key.startsWith('ANTHROPIC_')) && value) {
- filteredEnv[key] = value;
- }
- }
+ let subprocessEnv: Record;
- // Merge in any additional env vars passed by the caller (e.g., USE_CLAUDE_MD)
if (options.env) {
- for (const [key, value] of Object.entries(options.env)) {
- filteredEnv[key] = value;
- }
+ // Caller provided a complete environment (from getRunnerEnv()), use it directly
+ subprocessEnv = { ...options.env };
+ } else {
+ // Fallback: build a filtered environment for backwards compatibility
+ subprocessEnv = createFallbackRunnerEnv();
}
// Parse Python command to handle paths with spaces (e.g., ~/Library/Application Support/...)
const [pythonCommand, pythonBaseArgs] = parsePythonCommand(options.pythonPath);
const child = spawn(pythonCommand, [...pythonBaseArgs, ...options.args], {
cwd: options.cwd,
- env: filteredEnv,
+ env: subprocessEnv,
});
const promise = new Promise>((resolve) => {
diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts b/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts
index eea6215d90..7b343efb27 100644
--- a/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/gitlab/import-handlers.ts
@@ -63,7 +63,7 @@ export function registerImportIssues(): void {
) as GitLabAPIIssue;
// Create a spec/task from the issue
- const task = await createSpecForIssue(project, apiIssue, config);
+ const task = await createSpecForIssue(project, apiIssue, config, project.settings?.mainBranch);
if (task) {
tasks.push(task);
diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts b/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts
index 20b1a422cd..f383f03204 100644
--- a/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/gitlab/investigation-handlers.ts
@@ -158,7 +158,7 @@ export function registerInvestigateIssue(
});
// Create spec for the issue
- const task = await createSpecForIssue(project, issue, config);
+ const task = await createSpecForIssue(project, issue, config, project.settings?.mainBranch);
if (!task) {
sendError(getMainWindow, project.id, 'Failed to create task from issue');
diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/mr-review-handlers.ts b/apps/frontend/src/main/ipc-handlers/gitlab/mr-review-handlers.ts
index 62cb9e0e8e..b4c310804d 100644
--- a/apps/frontend/src/main/ipc-handlers/gitlab/mr-review-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/gitlab/mr-review-handlers.ts
@@ -33,6 +33,7 @@ import {
getPythonPath,
buildRunnerArgs,
} from '../github/utils/subprocess-runner';
+import { getRunnerEnv } from '../github/utils/runner-env';
/**
* Get the GitLab runner path
@@ -216,10 +217,14 @@ async function runMRReview(
debugLog('Spawning MR review process', { args, model, thinkingLevel });
+ // Get runner environment with PYTHONPATH for bundled packages (fixes #139)
+ const subprocessEnv = await getRunnerEnv();
+
const { process: childProcess, promise } = runPythonSubprocess({
pythonPath: getPythonPath(backendPath),
args,
cwd: backendPath,
+ env: subprocessEnv,
onProgress: (percent, message) => {
debugLog('Progress update', { percent, message });
sendProgress({
@@ -821,10 +826,14 @@ export function registerMRReviewHandlers(
debugLog('Spawning follow-up review process', { args, model, thinkingLevel });
+ // Get runner environment with PYTHONPATH for bundled packages (fixes #139)
+ const followupSubprocessEnv = await getRunnerEnv();
+
const { process: childProcess, promise } = runPythonSubprocess({
pythonPath: getPythonPath(backendPath),
args,
cwd: backendPath,
+ env: followupSubprocessEnv,
onProgress: (percent, message) => {
debugLog('Progress update', { percent, message });
sendProgress({
diff --git a/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts b/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts
index a8830ca320..c624a63f70 100644
--- a/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts
+++ b/apps/frontend/src/main/ipc-handlers/gitlab/spec-utils.ts
@@ -7,6 +7,7 @@ import { mkdir, writeFile, readFile, stat } from 'fs/promises';
import path from 'path';
import type { Project } from '../../../shared/types';
import type { GitLabAPIIssue, GitLabConfig } from './types';
+import { labelMatchesWholeWord } from '../shared/label-utils';
/**
* Simplified task info returned when creating a spec from a GitLab issue.
@@ -60,6 +61,47 @@ function debugLog(message: string, data?: unknown): void {
}
}
+/**
+ * Determine task category based on GitLab issue labels
+ * Maps to TaskCategory type from shared/types/task.ts
+ */
+function determineCategoryFromLabels(labels: string[]): 'feature' | 'bug_fix' | 'refactoring' | 'documentation' | 'security' | 'performance' | 'ui_ux' | 'infrastructure' | 'testing' {
+ const lowerLabels = labels.map(l => l.toLowerCase());
+
+ if (lowerLabels.some(l => l.includes('bug') || l.includes('defect') || l.includes('error') || l.includes('fix'))) {
+ return 'bug_fix';
+ }
+ if (lowerLabels.some(l => l.includes('security') || l.includes('vulnerability') || l.includes('cve'))) {
+ return 'security';
+ }
+ if (lowerLabels.some(l => l.includes('performance') || l.includes('optimization') || l.includes('speed'))) {
+ return 'performance';
+ }
+ if (lowerLabels.some(l => l.includes('ui') || l.includes('ux') || l.includes('design') || l.includes('styling'))) {
+ return 'ui_ux';
+ }
+ // Use whole-word matching for 'ci' and 'cd' to avoid false positives like 'acid' or 'decide'
+ if (lowerLabels.some(l =>
+ l.includes('infrastructure') ||
+ l.includes('devops') ||
+ l.includes('deployment') ||
+ labelMatchesWholeWord(l, 'ci') ||
+ labelMatchesWholeWord(l, 'cd')
+ )) {
+ return 'infrastructure';
+ }
+ if (lowerLabels.some(l => l.includes('test') || l.includes('testing') || l.includes('qa'))) {
+ return 'testing';
+ }
+ if (lowerLabels.some(l => l.includes('refactor') || l.includes('cleanup') || l.includes('maintenance') || l.includes('chore') || l.includes('tech-debt') || l.includes('technical debt'))) {
+ return 'refactoring';
+ }
+ if (lowerLabels.some(l => l.includes('documentation') || l.includes('docs'))) {
+ return 'documentation';
+ }
+ return 'feature';
+}
+
function stripControlChars(value: string, allowNewlines: boolean): string {
let sanitized = '';
for (let i = 0; i < value.length; i += 1) {
@@ -258,7 +300,8 @@ async function pathExists(filePath: string): Promise {
export async function createSpecForIssue(
project: Project,
issue: GitLabAPIIssue,
- config: GitLabConfig
+ config: GitLabConfig,
+ baseBranch?: string
): Promise {
try {
// Validate and sanitize network data before writing to disk
@@ -321,7 +364,7 @@ export async function createSpecForIssue(
const taskContent = buildIssueContext(safeIssue, safeProject, config.instanceUrl);
await writeFile(path.join(specDir, 'TASK.md'), taskContent, 'utf-8');
- // Create metadata.json
+ // Create metadata.json (legacy format for GitLab-specific data)
const metadata = {
source: 'gitlab',
gitlab: {
@@ -339,6 +382,21 @@ export async function createSpecForIssue(
};
await writeFile(metadataPath, JSON.stringify(metadata, null, 2), 'utf-8');
+ // Create task_metadata.json (consistent with GitHub format for backend compatibility)
+ const taskMetadata = {
+ sourceType: 'gitlab' as const,
+ gitlabIssueIid: safeIssue.iid,
+ gitlabUrl: safeIssue.web_url,
+ category: determineCategoryFromLabels(safeIssue.labels || []),
+ // Store baseBranch for worktree creation and QA comparison
+ ...(baseBranch && { baseBranch })
+ };
+ await writeFile(
+ path.join(specDir, 'task_metadata.json'),
+ JSON.stringify(taskMetadata, null, 2),
+ 'utf-8'
+ );
+
debugLog('Created spec for issue:', { iid: safeIssue.iid, specDir });
// Return task info
diff --git a/apps/frontend/src/main/ipc-handlers/index.ts b/apps/frontend/src/main/ipc-handlers/index.ts
index 3501abd8bc..b3ee57212b 100644
--- a/apps/frontend/src/main/ipc-handlers/index.ts
+++ b/apps/frontend/src/main/ipc-handlers/index.ts
@@ -23,7 +23,6 @@ import { registerEnvHandlers } from './env-handlers';
import { registerLinearHandlers } from './linear-handlers';
import { registerGithubHandlers } from './github-handlers';
import { registerGitlabHandlers } from './gitlab-handlers';
-import { registerAutobuildSourceHandlers } from './autobuild-source-handlers';
import { registerIdeationHandlers } from './ideation-handlers';
import { registerChangelogHandlers } from './changelog-handlers';
import { registerInsightsHandlers } from './insights-handlers';
@@ -32,6 +31,8 @@ import { registerAppUpdateHandlers } from './app-update-handlers';
import { registerDebugHandlers } from './debug-handlers';
import { registerClaudeCodeHandlers } from './claude-code-handlers';
import { registerMcpHandlers } from './mcp-handlers';
+import { registerProfileHandlers } from './profile-handlers';
+import { registerTerminalWorktreeIpcHandlers } from './terminal';
import { notificationService } from '../notification-service';
/**
@@ -60,6 +61,9 @@ export function setupIpcHandlers(
// Terminal and Claude profile handlers
registerTerminalHandlers(terminalManager, getMainWindow);
+ // Terminal worktree handlers (isolated development in worktrees)
+ registerTerminalWorktreeIpcHandlers();
+
// Agent event handlers (event forwarding from agent manager to renderer)
registerAgenteventsHandlers(agentManager, getMainWindow);
@@ -87,9 +91,6 @@ export function setupIpcHandlers(
// GitLab integration handlers
registerGitlabHandlers(agentManager, getMainWindow);
- // Auto-build source update handlers
- registerAutobuildSourceHandlers(getMainWindow);
-
// Ideation handlers
registerIdeationHandlers(agentManager, getMainWindow);
@@ -114,6 +115,9 @@ export function setupIpcHandlers(
// MCP server health check handlers
registerMcpHandlers();
+ // API Profile handlers (custom Anthropic-compatible endpoints)
+ registerProfileHandlers();
+
console.warn('[IPC] All handler modules registered successfully');
}
@@ -122,6 +126,7 @@ export {
registerProjectHandlers,
registerTaskHandlers,
registerTerminalHandlers,
+ registerTerminalWorktreeIpcHandlers,
registerAgenteventsHandlers,
registerSettingsHandlers,
registerFileHandlers,
@@ -131,7 +136,6 @@ export {
registerLinearHandlers,
registerGithubHandlers,
registerGitlabHandlers,
- registerAutobuildSourceHandlers,
registerIdeationHandlers,
registerChangelogHandlers,
registerInsightsHandlers,
@@ -139,5 +143,6 @@ export {
registerAppUpdateHandlers,
registerDebugHandlers,
registerClaudeCodeHandlers,
- registerMcpHandlers
+ registerMcpHandlers,
+ registerProfileHandlers
};
diff --git a/apps/frontend/src/main/ipc-handlers/insights-handlers.ts b/apps/frontend/src/main/ipc-handlers/insights-handlers.ts
index cef96a6d7d..11a18c0b88 100644
--- a/apps/frontend/src/main/ipc-handlers/insights-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/insights-handlers.ts
@@ -42,9 +42,27 @@ export function registerInsightsHandlers(
return;
}
- // Note: Python environment initialization should be handled by insightsService
- // or added here with proper dependency injection if needed
- insightsService.sendMessage(projectId, project.path, message, modelConfig);
+ // Await the async sendMessage to ensure proper error handling and
+ // that all async operations (like getProcessEnv) complete before
+ // the handler returns. This fixes race conditions on Windows where
+ // environment setup wouldn't complete before process spawn.
+ try {
+ await insightsService.sendMessage(projectId, project.path, message, modelConfig);
+ } catch (error) {
+ // Errors during sendMessage (executor errors) are already emitted via
+ // the 'error' event, but we catch here to prevent unhandled rejection
+ // and ensure all error types are reported to the UI
+ console.error('[Insights IPC] Error in sendMessage:', error);
+ const mainWindow = getMainWindow();
+ if (mainWindow) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ mainWindow.webContents.send(
+ IPC_CHANNELS.INSIGHTS_ERROR,
+ projectId,
+ `Failed to send message: ${errorMessage}`
+ );
+ }
+ }
}
);
diff --git a/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts b/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts
index 0515529973..50e16973e4 100644
--- a/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/mcp-handlers.ts
@@ -28,6 +28,12 @@ const DANGEROUS_FLAGS = new Set([
'--require', '-r'
]);
+/**
+ * Defense-in-depth: Shell metacharacters that could enable command injection
+ * when shell: true is used on Windows
+ */
+const SHELL_METACHARACTERS = ['&', '|', '>', '<', '^', '%', ';', '$', '`', '\n', '\r'];
+
/**
* Validate that a command is in the safe allowlist
*/
@@ -39,11 +45,22 @@ function isCommandSafe(command: string | undefined): boolean {
}
/**
- * Validate that args don't contain dangerous interpreter flags
+ * Validate that args don't contain dangerous interpreter flags or shell metacharacters
*/
function areArgsSafe(args: string[] | undefined): boolean {
if (!args || args.length === 0) return true;
- return !args.some(arg => DANGEROUS_FLAGS.has(arg));
+
+ // Check for dangerous interpreter flags
+ if (args.some(arg => DANGEROUS_FLAGS.has(arg))) return false;
+
+ // On Windows with shell: true, check for shell metacharacters that could enable injection
+ if (process.platform === 'win32') {
+ if (args.some(arg => SHELL_METACHARACTERS.some(char => arg.includes(char)))) {
+ return false;
+ }
+ }
+
+ return true;
}
/**
@@ -171,7 +188,7 @@ async function checkCommandHealth(server: CustomMcpServer, startTime: number): P
return resolve({
serverId: server.id,
status: 'unhealthy',
- message: 'Args contain dangerous interpreter flags',
+ message: 'Args contain dangerous flags or shell metacharacters',
checkedAt: new Date().toISOString(),
});
}
@@ -394,14 +411,17 @@ async function testCommandConnection(server: CustomMcpServer, startTime: number)
return resolve({
serverId: server.id,
success: false,
- message: 'Args contain dangerous interpreter flags',
+ message: 'Args contain dangerous flags or shell metacharacters',
});
}
const args = server.args || [];
+
+ // On Windows, use shell: true to properly handle .cmd/.bat scripts like npx
const proc = spawn(server.command!, args, {
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 15000, // OS-level timeout for reliable process termination
+ shell: process.platform === 'win32', // Required for Windows to run npx.cmd
});
let stdout = '';
diff --git a/apps/frontend/src/main/ipc-handlers/memory-handlers.ts b/apps/frontend/src/main/ipc-handlers/memory-handlers.ts
index 5b8c6d0504..9ea2b79ab4 100644
--- a/apps/frontend/src/main/ipc-handlers/memory-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/memory-handlers.ts
@@ -25,7 +25,7 @@ import {
} from '../memory-service';
import { validateOpenAIApiKey } from '../api-validation-service';
import { parsePythonCommand } from '../python-detector';
-import { getConfiguredPythonPath } from '../python-env-manager';
+import { getConfiguredPythonPath, pythonEnvManager } from '../python-env-manager';
import { openTerminalWithCommand } from './claude-code-handlers';
/**
@@ -212,7 +212,11 @@ function checkOllamaInstalled(): OllamaInstallStatus {
* - Official method per https://winstall.app/apps/Ollama.Ollama
* - Winget is pre-installed on Windows 10 (1709+) and Windows 11
*
- * macOS/Linux: Uses official install script from https://ollama.com/download
+ * macOS: Uses Homebrew (most common package manager on macOS)
+ * - Official method: brew install ollama
+ * - Reference: https://ollama.com/download/mac
+ *
+ * Linux: Uses official install script from https://ollama.com/download
*
* @returns {string} The install command to run in terminal
*/
@@ -222,8 +226,13 @@ function getOllamaInstallCommand(): string {
// This is an official installation method for Ollama on Windows
// Reference: https://winstall.app/apps/Ollama.Ollama
return 'winget install --id Ollama.Ollama --accept-source-agreements';
+ } else if (process.platform === 'darwin') {
+ // macOS: Use Homebrew (most widely used package manager on macOS)
+ // Official Ollama installation method for macOS
+ // Reference: https://ollama.com/download/mac
+ return 'brew install ollama';
} else {
- // macOS/Linux: Use shell script from official Ollama
+ // Linux: Use shell script from official Ollama
// Reference: https://ollama.com/download
return 'curl -fsSL https://ollama.com/install.sh | sh';
}
@@ -296,6 +305,9 @@ async function executeOllamaDetector(
let resolved = false;
const proc = spawn(pythonExe, args, {
stdio: ['ignore', 'pipe', 'pipe'],
+ // Use sanitized Python environment to prevent PYTHONHOME contamination
+ // Fixes "Could not find platform independent libraries" error on Windows
+ env: pythonEnvManager.getPythonEnv(),
});
let stdout = '';
@@ -769,6 +781,9 @@ export function registerMemoryHandlers(): void {
const proc = spawn(pythonExe, args, {
stdio: ['ignore', 'pipe', 'pipe'],
timeout: 600000, // 10 minute timeout for large models
+ // Use sanitized Python environment to prevent PYTHONHOME contamination
+ // Fixes "Could not find platform independent libraries" error on Windows
+ env: pythonEnvManager.getPythonEnv(),
});
let stdout = '';
diff --git a/apps/frontend/src/main/ipc-handlers/profile-handlers.test.ts b/apps/frontend/src/main/ipc-handlers/profile-handlers.test.ts
new file mode 100644
index 0000000000..0e115e4647
--- /dev/null
+++ b/apps/frontend/src/main/ipc-handlers/profile-handlers.test.ts
@@ -0,0 +1,341 @@
+/**
+ * Tests for profile IPC handlers
+ *
+ * Tests profiles:set-active handler with support for:
+ * - Setting valid profile as active
+ * - Switching to OAuth (null profileId)
+ */
+
+import { describe, it, expect, vi, beforeEach } from 'vitest';
+import type { APIProfile, ProfilesFile } from '@shared/types/profile';
+
+// Hoist mocked functions to avoid circular dependency in atomicModifyProfiles
+const { mockedLoadProfilesFile, mockedSaveProfilesFile } = vi.hoisted(() => ({
+ mockedLoadProfilesFile: vi.fn(),
+ mockedSaveProfilesFile: vi.fn()
+}));
+
+// Mock electron before importing
+vi.mock('electron', () => ({
+ ipcMain: {
+ handle: vi.fn(),
+ on: vi.fn()
+ }
+}));
+
+// Mock profile service
+vi.mock('../services/profile', () => ({
+ loadProfilesFile: mockedLoadProfilesFile,
+ saveProfilesFile: mockedSaveProfilesFile,
+ validateFilePermissions: vi.fn(),
+ getProfilesFilePath: vi.fn(() => '/test/profiles.json'),
+ createProfile: vi.fn(),
+ updateProfile: vi.fn(),
+ deleteProfile: vi.fn(),
+ testConnection: vi.fn(),
+ discoverModels: vi.fn(),
+ atomicModifyProfiles: vi.fn(async (modifier: (file: unknown) => unknown) => {
+ const file = await mockedLoadProfilesFile();
+ const modified = modifier(file);
+ await mockedSaveProfilesFile(modified as never);
+ return modified;
+ })
+}));
+
+import { registerProfileHandlers } from './profile-handlers';
+import { ipcMain } from 'electron';
+import { IPC_CHANNELS } from '../../shared/constants';
+import {
+ loadProfilesFile,
+ saveProfilesFile,
+ validateFilePermissions,
+ testConnection
+} from '../services/profile';
+import type { TestConnectionResult } from '@shared/types/profile';
+
+// Get the handler function for testing
+function getSetActiveHandler() {
+ const calls = (ipcMain.handle as unknown as ReturnType).mock.calls;
+ const setActiveCall = calls.find(
+ (call) => call[0] === IPC_CHANNELS.PROFILES_SET_ACTIVE
+ );
+ return setActiveCall?.[1];
+}
+
+// Get the testConnection handler function for testing
+function getTestConnectionHandler() {
+ const calls = (ipcMain.handle as unknown as ReturnType).mock.calls;
+ const testConnectionCall = calls.find(
+ (call) => call[0] === IPC_CHANNELS.PROFILES_TEST_CONNECTION
+ );
+ return testConnectionCall?.[1];
+}
+
+describe('profile-handlers - setActiveProfile', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ registerProfileHandlers();
+ });
+ const mockProfiles: APIProfile[] = [
+ {
+ id: 'profile-1',
+ name: 'Test Profile 1',
+ baseUrl: 'https://api.anthropic.com',
+ apiKey: 'sk-ant-test-key-1',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ },
+ {
+ id: 'profile-2',
+ name: 'Test Profile 2',
+ baseUrl: 'https://custom.api.com',
+ apiKey: 'sk-custom-key-2',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ];
+
+ describe('setting valid profile as active', () => {
+ it('should set active profile with valid profileId', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: mockProfiles,
+ activeProfileId: null,
+ version: 1
+ };
+
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+ vi.mocked(saveProfilesFile).mockResolvedValue(undefined);
+ vi.mocked(validateFilePermissions).mockResolvedValue(true);
+
+ const handler = getSetActiveHandler();
+ const result = await handler({}, 'profile-1');
+
+ expect(result).toEqual({ success: true });
+ expect(saveProfilesFile).toHaveBeenCalledWith(
+ expect.objectContaining({
+ activeProfileId: 'profile-1'
+ })
+ );
+ });
+
+ it('should return error for non-existent profile', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: mockProfiles,
+ activeProfileId: null,
+ version: 1
+ };
+
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const handler = getSetActiveHandler();
+ const result = await handler({}, 'non-existent-id');
+
+ expect(result).toEqual({
+ success: false,
+ error: 'Profile not found'
+ });
+ });
+ });
+
+ describe('switching to OAuth (null profileId)', () => {
+ it('should accept null profileId to switch to OAuth', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: mockProfiles,
+ activeProfileId: 'profile-1',
+ version: 1
+ };
+
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+ vi.mocked(saveProfilesFile).mockResolvedValue(undefined);
+ vi.mocked(validateFilePermissions).mockResolvedValue(true);
+
+ const handler = getSetActiveHandler();
+ const result = await handler({}, null);
+
+ // Should succeed and clear activeProfileId
+ expect(result).toEqual({ success: true });
+ expect(saveProfilesFile).toHaveBeenCalledWith(
+ expect.objectContaining({
+ activeProfileId: null
+ })
+ );
+ });
+
+ it('should handle null when no profile was active', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: mockProfiles,
+ activeProfileId: null,
+ version: 1
+ };
+
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+ vi.mocked(saveProfilesFile).mockResolvedValue(undefined);
+ vi.mocked(validateFilePermissions).mockResolvedValue(true);
+
+ const handler = getSetActiveHandler();
+ const result = await handler({}, null);
+
+ // Should succeed (idempotent operation)
+ expect(result).toEqual({ success: true });
+ expect(saveProfilesFile).toHaveBeenCalled();
+ });
+ });
+
+ describe('error handling', () => {
+ it('should handle loadProfilesFile errors', async () => {
+ vi.mocked(loadProfilesFile).mockRejectedValue(
+ new Error('Failed to load profiles')
+ );
+
+ const handler = getSetActiveHandler();
+ const result = await handler({}, 'profile-1');
+
+ expect(result).toEqual({
+ success: false,
+ error: 'Failed to load profiles'
+ });
+ });
+
+ it('should handle saveProfilesFile errors', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: mockProfiles,
+ activeProfileId: null,
+ version: 1
+ };
+
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+ vi.mocked(saveProfilesFile).mockRejectedValue(
+ new Error('Failed to save')
+ );
+
+ const handler = getSetActiveHandler();
+ const result = await handler({}, 'profile-1');
+
+ expect(result).toEqual({
+ success: false,
+ error: 'Failed to save'
+ });
+ });
+ });
+});
+
+describe('profile-handlers - testConnection', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ registerProfileHandlers();
+ });
+
+ describe('successful connection tests', () => {
+ it('should return success result for valid connection', async () => {
+ const mockResult: TestConnectionResult = {
+ success: true,
+ message: 'Connection successful'
+ };
+
+ vi.mocked(testConnection).mockResolvedValue(mockResult);
+
+ const handler = getTestConnectionHandler();
+ const result = await handler({}, 'https://api.anthropic.com', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: true,
+ data: mockResult
+ });
+ expect(testConnection).toHaveBeenCalledWith(
+ 'https://api.anthropic.com',
+ 'sk-test-key-12chars',
+ expect.any(AbortSignal)
+ );
+ });
+ });
+
+ describe('input validation', () => {
+ it('should return error for empty baseUrl', async () => {
+ const handler = getTestConnectionHandler();
+ const result = await handler({}, '', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ error: 'Base URL is required'
+ });
+ expect(testConnection).not.toHaveBeenCalled();
+ });
+
+ it('should return error for whitespace-only baseUrl', async () => {
+ const handler = getTestConnectionHandler();
+ const result = await handler({}, ' ', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ error: 'Base URL is required'
+ });
+ expect(testConnection).not.toHaveBeenCalled();
+ });
+
+ it('should return error for empty apiKey', async () => {
+ const handler = getTestConnectionHandler();
+ const result = await handler({}, 'https://api.anthropic.com', '');
+
+ expect(result).toEqual({
+ success: false,
+ error: 'API key is required'
+ });
+ expect(testConnection).not.toHaveBeenCalled();
+ });
+
+ it('should return error for whitespace-only apiKey', async () => {
+ const handler = getTestConnectionHandler();
+ const result = await handler({}, 'https://api.anthropic.com', ' ');
+
+ expect(result).toEqual({
+ success: false,
+ error: 'API key is required'
+ });
+ expect(testConnection).not.toHaveBeenCalled();
+ });
+ });
+
+ describe('error handling', () => {
+ it('should return IPCResult with TestConnectionResult data for service errors', async () => {
+ const mockResult: TestConnectionResult = {
+ success: false,
+ errorType: 'auth',
+ message: 'Authentication failed. Please check your API key.'
+ };
+
+ vi.mocked(testConnection).mockResolvedValue(mockResult);
+
+ const handler = getTestConnectionHandler();
+ const result = await handler({}, 'https://api.anthropic.com', 'invalid-key');
+
+ expect(result).toEqual({
+ success: true,
+ data: mockResult
+ });
+ });
+
+ it('should return error for unexpected exceptions', async () => {
+ vi.mocked(testConnection).mockRejectedValue(new Error('Unexpected error'));
+
+ const handler = getTestConnectionHandler();
+ const result = await handler({}, 'https://api.anthropic.com', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ error: 'Unexpected error'
+ });
+ });
+
+ it('should return error for non-Error exceptions', async () => {
+ vi.mocked(testConnection).mockRejectedValue('String error');
+
+ const handler = getTestConnectionHandler();
+ const result = await handler({}, 'https://api.anthropic.com', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ error: 'Failed to test connection'
+ });
+ });
+ });
+});
diff --git a/apps/frontend/src/main/ipc-handlers/profile-handlers.ts b/apps/frontend/src/main/ipc-handlers/profile-handlers.ts
new file mode 100644
index 0000000000..565a1a711d
--- /dev/null
+++ b/apps/frontend/src/main/ipc-handlers/profile-handlers.ts
@@ -0,0 +1,401 @@
+/**
+ * Profile IPC Handlers
+ *
+ * IPC handlers for API profile management:
+ * - profiles:get - Get all profiles
+ * - profiles:save - Save/create a profile
+ * - profiles:update - Update an existing profile
+ * - profiles:delete - Delete a profile
+ * - profiles:setActive - Set active profile
+ * - profiles:test-connection - Test API profile connection
+ */
+
+import { ipcMain } from 'electron';
+import { IPC_CHANNELS } from '../../shared/constants';
+import type { IPCResult } from '../../shared/types';
+import type { APIProfile, ProfileFormData, ProfilesFile, TestConnectionResult, DiscoverModelsResult } from '@shared/types/profile';
+import {
+ loadProfilesFile,
+ saveProfilesFile,
+ validateFilePermissions,
+ getProfilesFilePath,
+ atomicModifyProfiles,
+ createProfile,
+ updateProfile,
+ deleteProfile,
+ testConnection,
+ testFoundryConnection,
+ discoverModels
+} from '../services/profile';
+import type { APIProfileType } from '@shared/types/profile';
+
+// Track active test connection requests for cancellation
+const activeTestConnections = new Map();
+
+// Track active discover models requests for cancellation
+const activeDiscoverModelsRequests = new Map();
+
+/**
+ * Register all profile-related IPC handlers
+ */
+export function registerProfileHandlers(): void {
+ /**
+ * Get all profiles
+ */
+ ipcMain.handle(
+ IPC_CHANNELS.PROFILES_GET,
+ async (): Promise> => {
+ try {
+ const profiles = await loadProfilesFile();
+ return { success: true, data: profiles };
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to load profiles'
+ };
+ }
+ }
+ );
+
+ /**
+ * Save/create a profile
+ */
+ ipcMain.handle(
+ IPC_CHANNELS.PROFILES_SAVE,
+ async (
+ _,
+ profileData: ProfileFormData
+ ): Promise> => {
+ try {
+ // Use createProfile from service layer (handles validation)
+ const newProfile = await createProfile(profileData);
+
+ // Set file permissions to user-readable only
+ await validateFilePermissions(getProfilesFilePath()).catch((err) => {
+ console.warn('[profile-handlers] Failed to set secure file permissions:', err);
+ });
+
+ return { success: true, data: newProfile };
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to save profile'
+ };
+ }
+ }
+ );
+
+ /**
+ * Update an existing profile
+ */
+ ipcMain.handle(
+ IPC_CHANNELS.PROFILES_UPDATE,
+ async (_, profileData: APIProfile): Promise> => {
+ try {
+ // Use updateProfile from service layer (handles validation)
+ const updatedProfile = await updateProfile({
+ id: profileData.id,
+ name: profileData.name,
+ type: profileData.type,
+ baseUrl: profileData.baseUrl,
+ apiKey: profileData.apiKey,
+ foundryResource: profileData.foundryResource,
+ models: profileData.models
+ });
+
+ // Set file permissions to user-readable only
+ await validateFilePermissions(getProfilesFilePath()).catch((err) => {
+ console.warn('[profile-handlers] Failed to set secure file permissions:', err);
+ });
+
+ return { success: true, data: updatedProfile };
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to update profile'
+ };
+ }
+ }
+ );
+
+ /**
+ * Delete a profile
+ */
+ ipcMain.handle(
+ IPC_CHANNELS.PROFILES_DELETE,
+ async (_, profileId: string): Promise => {
+ try {
+ // Use deleteProfile from service layer (handles validation)
+ await deleteProfile(profileId);
+
+ return { success: true };
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to delete profile'
+ };
+ }
+ }
+ );
+
+ /**
+ * Set active profile
+ * - If profileId is provided, set that profile as active
+ * - If profileId is null, clear active profile (switch to OAuth)
+ * Uses atomic operation to prevent race conditions
+ */
+ ipcMain.handle(
+ IPC_CHANNELS.PROFILES_SET_ACTIVE,
+ async (_, profileId: string | null): Promise => {
+ try {
+ await atomicModifyProfiles((file) => {
+ // If switching to OAuth (null), clear active profile
+ if (profileId === null) {
+ file.activeProfileId = null;
+ return file;
+ }
+
+ // Check if profile exists
+ const profileExists = file.profiles.some((p) => p.id === profileId);
+ if (!profileExists) {
+ throw new Error('Profile not found');
+ }
+
+ // Set active profile
+ file.activeProfileId = profileId;
+ return file;
+ });
+
+ return { success: true };
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to set active profile'
+ };
+ }
+ }
+ );
+
+ /**
+ * Test API profile connection
+ * - Tests credentials by making a minimal API request
+ * - Returns detailed error information for different failure types
+ * - Includes configurable timeout (defaults to 15 seconds)
+ * - Supports cancellation via PROFILES_TEST_CONNECTION_CANCEL
+ * - Routes to appropriate test function based on profile type
+ */
+ ipcMain.handle(
+ IPC_CHANNELS.PROFILES_TEST_CONNECTION,
+ async (
+ _event,
+ baseUrl: string,
+ apiKey: string,
+ requestId: number,
+ profileType?: APIProfileType,
+ foundryResource?: string
+ ): Promise> => {
+ // Create AbortController for timeout and cancellation
+ const controller = new AbortController();
+ const timeoutMs = 15000; // 15 seconds
+
+ // Track this request for cancellation
+ activeTestConnections.set(requestId, controller);
+
+ // Set timeout to abort the request
+ const timeoutId = setTimeout(() => {
+ controller.abort();
+ }, timeoutMs);
+
+ try {
+ // Determine the effective profile type (default to 'anthropic')
+ const effectiveType = profileType || 'anthropic';
+
+ // Route to appropriate test function based on profile type
+ if (effectiveType === 'foundry') {
+ // For Foundry, need either foundryResource OR baseUrl
+ const hasResource = foundryResource && foundryResource.trim() !== '';
+ const hasBaseUrl = baseUrl && baseUrl.trim() !== '';
+
+ if (!hasResource && !hasBaseUrl) {
+ clearTimeout(timeoutId);
+ activeTestConnections.delete(requestId);
+ return {
+ success: false,
+ error: 'Either Azure Resource Name or Endpoint URL is required'
+ };
+ }
+
+ // API key is optional for Foundry (Entra ID auth supported)
+ const result = await testFoundryConnection(
+ baseUrl || '',
+ apiKey || '',
+ foundryResource || '',
+ controller.signal
+ );
+
+ clearTimeout(timeoutId);
+ activeTestConnections.delete(requestId);
+ return { success: true, data: result };
+ }
+
+ // Anthropic profile - validate inputs (null/empty checks)
+ if (!baseUrl || baseUrl.trim() === '') {
+ clearTimeout(timeoutId);
+ activeTestConnections.delete(requestId);
+ return {
+ success: false,
+ error: 'Base URL is required'
+ };
+ }
+
+ if (!apiKey || apiKey.trim() === '') {
+ clearTimeout(timeoutId);
+ activeTestConnections.delete(requestId);
+ return {
+ success: false,
+ error: 'API key is required'
+ };
+ }
+
+ // Call testConnection from service layer with abort signal
+ const result = await testConnection(baseUrl, apiKey, controller.signal);
+
+ // Clear timeout on success
+ clearTimeout(timeoutId);
+ activeTestConnections.delete(requestId);
+
+ return { success: true, data: result };
+ } catch (error) {
+ // Clear timeout on error
+ clearTimeout(timeoutId);
+ activeTestConnections.delete(requestId);
+
+ // Handle abort errors (timeout or explicit cancellation)
+ if (error instanceof Error && error.name === 'AbortError') {
+ return {
+ success: false,
+ error: 'Connection timeout. The request took too long to complete.'
+ };
+ }
+
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to test connection'
+ };
+ }
+ }
+ );
+
+ /**
+ * Cancel an active test connection request
+ */
+ ipcMain.on(
+ IPC_CHANNELS.PROFILES_TEST_CONNECTION_CANCEL,
+ (_event, requestId: number) => {
+ const controller = activeTestConnections.get(requestId);
+ if (controller) {
+ controller.abort();
+ activeTestConnections.delete(requestId);
+ }
+ }
+ );
+
+ /**
+ * Discover available models from API endpoint
+ * - Fetches list of models from /v1/models endpoint
+ * - Returns model IDs and display names for dropdown selection
+ * - Supports cancellation via PROFILES_DISCOVER_MODELS_CANCEL
+ */
+ ipcMain.handle(
+ IPC_CHANNELS.PROFILES_DISCOVER_MODELS,
+ async (_event, baseUrl: string, apiKey: string, requestId: number): Promise> => {
+ console.log('[discoverModels] Called with:', { baseUrl, requestId });
+
+ // Create AbortController for timeout and cancellation
+ const controller = new AbortController();
+ const timeoutMs = 15000; // 15 seconds
+
+ // Track this request for cancellation
+ activeDiscoverModelsRequests.set(requestId, controller);
+
+ // Set timeout to abort the request
+ const timeoutId = setTimeout(() => {
+ controller.abort();
+ }, timeoutMs);
+
+ try {
+ // Validate inputs (null/empty checks)
+ if (!baseUrl || baseUrl.trim() === '') {
+ clearTimeout(timeoutId);
+ activeDiscoverModelsRequests.delete(requestId);
+ return {
+ success: false,
+ error: 'Base URL is required'
+ };
+ }
+
+ if (!apiKey || apiKey.trim() === '') {
+ clearTimeout(timeoutId);
+ activeDiscoverModelsRequests.delete(requestId);
+ return {
+ success: false,
+ error: 'API key is required'
+ };
+ }
+
+ // Call discoverModels from service layer with abort signal
+ const result = await discoverModels(baseUrl, apiKey, controller.signal);
+
+ // Clear timeout on success
+ clearTimeout(timeoutId);
+ activeDiscoverModelsRequests.delete(requestId);
+
+ return { success: true, data: result };
+ } catch (error) {
+ // Clear timeout on error
+ clearTimeout(timeoutId);
+ activeDiscoverModelsRequests.delete(requestId);
+
+ // Handle abort errors (timeout or explicit cancellation)
+ if (error instanceof Error && error.name === 'AbortError') {
+ return {
+ success: false,
+ error: 'Connection timeout. The request took too long to complete.'
+ };
+ }
+
+ // Extract error type if available
+ const errorType = (error as any).errorType;
+ const errorMessage = error instanceof Error ? error.message : 'Failed to discover models';
+
+ // Log for debugging
+ console.error('[discoverModels] Error:', {
+ name: error instanceof Error ? error.name : 'unknown',
+ message: errorMessage,
+ errorType,
+ originalError: error
+ });
+
+ // Include error type in error message for UI to handle appropriately
+ return {
+ success: false,
+ error: errorMessage
+ };
+ }
+ }
+ );
+
+ /**
+ * Cancel an active discover models request
+ */
+ ipcMain.on(
+ IPC_CHANNELS.PROFILES_DISCOVER_MODELS_CANCEL,
+ (_event, requestId: number) => {
+ const controller = activeDiscoverModelsRequests.get(requestId);
+ if (controller) {
+ controller.abort();
+ activeDiscoverModelsRequests.delete(requestId);
+ }
+ }
+ );
+}
diff --git a/apps/frontend/src/main/ipc-handlers/project-handlers.ts b/apps/frontend/src/main/ipc-handlers/project-handlers.ts
index 4ca0eb726b..d752be8d7f 100644
--- a/apps/frontend/src/main/ipc-handlers/project-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/project-handlers.ts
@@ -34,16 +34,56 @@ import { getEffectiveSourcePath } from '../updater/path-resolver';
// ============================================
/**
- * Get list of git branches for a directory
+ * Get list of git branches for a directory (both local and remote)
*/
function getGitBranches(projectPath: string): string[] {
try {
- const result = execFileSync(getToolPath('git'), ['branch', '--list', '--format=%(refname:short)'], {
+ // First fetch to ensure we have latest remote refs
+ try {
+ execFileSync(getToolPath('git'), ['fetch', '--prune'], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ timeout: 10000 // 10 second timeout for fetch
+ });
+ } catch {
+ // Fetch may fail if offline or no remote, continue with local refs
+ }
+
+ // Get all branches (local + remote) using --all flag
+ const result = execFileSync(getToolPath('git'), ['branch', '--all', '--format=%(refname:short)'], {
cwd: projectPath,
encoding: 'utf-8',
stdio: ['pipe', 'pipe', 'pipe']
});
- return result.trim().split('\n').filter(b => b.trim());
+
+ const branches = result.trim().split('\n')
+ .filter(b => b.trim())
+ .map(b => {
+ // Remote branches come as "origin/branch-name", keep the full name
+ // but remove the "origin/" prefix for display while keeping it usable
+ return b.trim();
+ })
+ // Remove HEAD pointer entries like "origin/HEAD"
+ .filter(b => !b.endsWith('/HEAD'))
+ // Remove duplicates (local branch may exist alongside remote)
+ .filter((branch, index, self) => {
+ // If it's a remote branch (origin/x) and local version exists, keep local
+ if (branch.startsWith('origin/')) {
+ const localName = branch.replace('origin/', '');
+ return !self.includes(localName);
+ }
+ return self.indexOf(branch) === index;
+ });
+
+ // Sort: local branches first, then remote branches
+ return branches.sort((a, b) => {
+ const aIsRemote = a.startsWith('origin/');
+ const bIsRemote = b.startsWith('origin/');
+ if (aIsRemote && !bIsRemote) return 1;
+ if (!aIsRemote && bIsRemote) return -1;
+ return a.localeCompare(b);
+ });
} catch {
return [];
}
diff --git a/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts b/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts
index 0eb8b3aa13..62f9faee98 100644
--- a/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts
+++ b/apps/frontend/src/main/ipc-handlers/roadmap/transformers.ts
@@ -96,6 +96,57 @@ function transformPhase(raw: RawRoadmapPhase): RoadmapPhase {
};
}
+/**
+ * Maps all known backend status values to canonical Kanban column statuses.
+ * Includes valid statuses as identity mappings for consistent lookup.
+ * Module-level constant for efficiency (not recreated on each call).
+ */
+const STATUS_MAP: Record = {
+ // Canonical Kanban statuses (identity mappings)
+ 'under_review': 'under_review',
+ 'planned': 'planned',
+ 'in_progress': 'in_progress',
+ 'done': 'done',
+ // Early-stage / ideation statuses → under_review
+ 'idea': 'under_review',
+ 'backlog': 'under_review',
+ 'proposed': 'under_review',
+ 'pending': 'under_review',
+ // Approved / scheduled statuses → planned
+ 'approved': 'planned',
+ 'scheduled': 'planned',
+ // Active development statuses → in_progress
+ 'active': 'in_progress',
+ 'building': 'in_progress',
+ // Completed statuses → done
+ 'complete': 'done',
+ 'completed': 'done',
+ 'shipped': 'done'
+};
+
+/**
+ * Normalizes a feature status string to a valid Kanban column status.
+ * Handles case-insensitive matching and maps backend values to canonical statuses.
+ *
+ * @param status - The raw status string from the backend
+ * @returns A valid RoadmapFeature status for Kanban display
+ */
+function normalizeFeatureStatus(status: string | undefined): RoadmapFeature['status'] {
+ if (!status) return 'under_review';
+
+ const normalized = STATUS_MAP[status.toLowerCase()];
+
+ if (!normalized) {
+ // Debug log for unmapped statuses to aid future mapping additions
+ if (process.env.NODE_ENV === 'development') {
+ console.debug(`[Roadmap] normalizeFeatureStatus: unmapped status "${status}", defaulting to "under_review"`);
+ }
+ return 'under_review';
+ }
+
+ return normalized;
+}
+
function transformFeature(raw: RawRoadmapFeature): RoadmapFeature {
return {
id: raw.id,
@@ -107,7 +158,7 @@ function transformFeature(raw: RawRoadmapFeature): RoadmapFeature {
impact: (raw.impact as RoadmapFeature['impact']) || 'medium',
phaseId: raw.phase_id || raw.phaseId || '',
dependencies: raw.dependencies || [],
- status: (raw.status as RoadmapFeature['status']) || 'under_review',
+ status: normalizeFeatureStatus(raw.status),
acceptanceCriteria: raw.acceptance_criteria || raw.acceptanceCriteria || [],
userStories: raw.user_stories || raw.userStories || [],
linkedSpecId: raw.linked_spec_id || raw.linkedSpecId,
@@ -115,6 +166,7 @@ function transformFeature(raw: RawRoadmapFeature): RoadmapFeature {
};
}
+
export function transformRoadmapFromSnakeCase(
raw: RawRoadmap,
projectId: string,
diff --git a/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt b/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt
index 5432d01173..ff5bb4bd42 100644
--- a/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt
+++ b/apps/frontend/src/main/ipc-handlers/sections/integration-section.txt
@@ -304,9 +304,10 @@ ${existingVars['GRAPHITI_DATABASE'] ? `GRAPHITI_DATABASE=${existingVars['GRAPHIT
try {
// Check if Claude CLI is available and authenticated
const result = await new Promise((resolve) => {
- const proc = spawn('claude', ['--version'], {
+ const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation();
+ const proc = spawn(claudeCmd, ['--version'], {
cwd: project.path,
- env: { ...process.env },
+ env: claudeEnv,
shell: true
});
@@ -325,9 +326,9 @@ ${existingVars['GRAPHITI_DATABASE'] ? `GRAPHITI_DATABASE=${existingVars['GRAPHIT
if (code === 0) {
// Claude CLI is available, check if authenticated
// Run a simple command that requires auth
- const authCheck = spawn('claude', ['api', '--help'], {
+ const authCheck = spawn(claudeCmd, ['api', '--help'], {
cwd: project.path,
- env: { ...process.env },
+ env: claudeEnv,
shell: true
});
@@ -384,9 +385,10 @@ ${existingVars['GRAPHITI_DATABASE'] ? `GRAPHITI_DATABASE=${existingVars['GRAPHIT
try {
// Run claude setup-token which will open browser for OAuth
const result = await new Promise((resolve) => {
- const proc = spawn('claude', ['setup-token'], {
+ const { command: claudeCmd, env: claudeEnv } = getClaudeCliInvocation();
+ const proc = spawn(claudeCmd, ['setup-token'], {
cwd: project.path,
- env: { ...process.env },
+ env: claudeEnv,
shell: true,
stdio: 'inherit' // This allows the terminal to handle the interactive auth
});
diff --git a/apps/frontend/src/main/ipc-handlers/settings-handlers.ts b/apps/frontend/src/main/ipc-handlers/settings-handlers.ts
index d6e7b94ff4..9aecfca97d 100644
--- a/apps/frontend/src/main/ipc-handlers/settings-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/settings-handlers.ts
@@ -1,19 +1,21 @@
import { ipcMain, dialog, app, shell } from 'electron';
-import { existsSync, writeFileSync, mkdirSync, statSync } from 'fs';
+import { existsSync, writeFileSync, mkdirSync, statSync, readFileSync } from 'fs';
import { execFileSync } from 'node:child_process';
import path from 'path';
import { is } from '@electron-toolkit/utils';
import { IPC_CHANNELS, DEFAULT_APP_SETTINGS, DEFAULT_AGENT_PROFILES } from '../../shared/constants';
import type {
AppSettings,
- IPCResult
+ IPCResult,
+ SourceEnvConfig,
+ SourceEnvCheckResult
} from '../../shared/types';
import { AgentManager } from '../agent';
import type { BrowserWindow } from 'electron';
-import { getEffectiveVersion } from '../auto-claude-updater';
-import { setUpdateChannel } from '../app-updater';
+import { setUpdateChannel, setUpdateChannelWithDowngradeCheck } from '../app-updater';
import { getSettingsPath, readSettingsFile } from '../settings-utils';
-import { configureTools, getToolPath, getToolInfo, isPathFromWrongPlatform } from '../cli-tool-manager';
+import { configureTools, getToolPath, getToolInfo, isPathFromWrongPlatform, preWarmToolCache } from '../cli-tool-manager';
+import { parseEnvFile } from './utils';
const settingsPath = getSettingsPath();
@@ -34,13 +36,16 @@ const detectAutoBuildSourcePath = (): string | null => {
);
} else {
// Production mode paths (packaged app)
- // On Windows/Linux/macOS, the app might be installed anywhere
- // We check common locations relative to the app bundle
+ // The backend is bundled as extraResources/backend
+ // On all platforms, it should be at process.resourcesPath/backend
+ possiblePaths.push(
+ path.resolve(process.resourcesPath, 'backend') // Primary: extraResources/backend
+ );
+ // Fallback paths for different app structures
const appPath = app.getAppPath();
possiblePaths.push(
- path.resolve(appPath, '..', 'backend'), // Sibling to app
- path.resolve(appPath, '..', '..', 'backend'), // Up 2 from app
- path.resolve(process.resourcesPath, '..', 'backend') // Relative to resources
+ path.resolve(appPath, '..', 'backend'), // Sibling to asar
+ path.resolve(appPath, '..', '..', 'Resources', 'backend') // macOS bundle structure
);
}
@@ -166,6 +171,11 @@ export function registerSettingsHandlers(
claudePath: settings.claudePath,
});
+ // Re-warm cache asynchronously after configuring (non-blocking)
+ preWarmToolCache(['claude']).catch((error) => {
+ console.warn('[SETTINGS_GET] Failed to re-warm CLI cache:', error);
+ });
+
return { success: true, data: settings as AppSettings };
}
);
@@ -207,12 +217,25 @@ export function registerSettingsHandlers(
githubCLIPath: newSettings.githubCLIPath,
claudePath: newSettings.claudePath,
});
+
+ // Re-warm cache asynchronously after configuring (non-blocking)
+ preWarmToolCache(['claude']).catch((error) => {
+ console.warn('[SETTINGS_SAVE] Failed to re-warm CLI cache:', error);
+ });
}
// Update auto-updater channel if betaUpdates setting changed
if (settings.betaUpdates !== undefined) {
- const channel = settings.betaUpdates ? 'beta' : 'latest';
- setUpdateChannel(channel);
+ if (settings.betaUpdates) {
+ // Enabling beta updates - just switch channel
+ setUpdateChannel('beta');
+ } else {
+ // Disabling beta updates - switch to stable and check if downgrade is available
+ // This will notify the renderer if user is on a prerelease and stable version exists
+ setUpdateChannelWithDowngradeCheck('latest', true).catch((err) => {
+ console.error('[settings-handlers] Failed to check for stable downgrade:', err);
+ });
+ }
}
return { success: true };
@@ -372,8 +395,8 @@ export function registerSettingsHandlers(
// ============================================
ipcMain.handle(IPC_CHANNELS.APP_VERSION, async (): Promise => {
- // Use effective version which accounts for source updates
- const version = getEffectiveVersion();
+ // Return the actual bundled version from package.json
+ const version = app.getVersion();
console.log('[settings-handlers] APP_VERSION returning:', version);
return version;
});
@@ -499,4 +522,238 @@ export function registerSettingsHandlers(
}
}
);
+
+ // ============================================
+ // Auto-Build Source Environment Operations
+ // ============================================
+
+ /**
+ * Helper to get source .env path from settings
+ *
+ * In production mode, the .env file is NOT bundled (excluded in electron-builder config).
+ * We store the source .env in app userData directory instead, which is writable.
+ * The sourcePath points to the bundled backend for reference, but envPath is in userData.
+ */
+ const getSourceEnvPath = (): {
+ sourcePath: string | null;
+ envPath: string | null;
+ isProduction: boolean;
+ } => {
+ const savedSettings = readSettingsFile();
+ const settings = { ...DEFAULT_APP_SETTINGS, ...savedSettings };
+
+ // Get autoBuildPath from settings or try to auto-detect
+ let sourcePath: string | null = settings.autoBuildPath || null;
+ if (!sourcePath) {
+ sourcePath = detectAutoBuildSourcePath();
+ }
+
+ if (!sourcePath) {
+ return { sourcePath: null, envPath: null, isProduction: !is.dev };
+ }
+
+ // In production, use userData directory for .env since resources may be read-only
+ // In development, use the actual source path
+ let envPath: string;
+ if (is.dev) {
+ envPath = path.join(sourcePath, '.env');
+ } else {
+ // Production: store .env in userData/backend/.env
+ const userDataBackendDir = path.join(app.getPath('userData'), 'backend');
+ if (!existsSync(userDataBackendDir)) {
+ mkdirSync(userDataBackendDir, { recursive: true });
+ }
+ envPath = path.join(userDataBackendDir, '.env');
+ }
+
+ return {
+ sourcePath,
+ envPath,
+ isProduction: !is.dev
+ };
+ };
+
+ ipcMain.handle(
+ IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_GET,
+ async (): Promise> => {
+ try {
+ const { sourcePath, envPath } = getSourceEnvPath();
+
+ // Load global settings to check for global token fallback
+ const savedSettings = readSettingsFile();
+ const globalSettings = { ...DEFAULT_APP_SETTINGS, ...savedSettings };
+
+ if (!sourcePath) {
+ // Even without source path, check global token
+ const globalToken = globalSettings.globalClaudeOAuthToken;
+ return {
+ success: true,
+ data: {
+ hasClaudeToken: !!globalToken && globalToken.length > 0,
+ claudeOAuthToken: globalToken,
+ envExists: false
+ }
+ };
+ }
+
+ const envExists = envPath ? existsSync(envPath) : false;
+ let hasClaudeToken = false;
+ let claudeOAuthToken: string | undefined;
+
+ // First, check source .env file
+ if (envExists && envPath) {
+ const content = readFileSync(envPath, 'utf-8');
+ const vars = parseEnvFile(content);
+ claudeOAuthToken = vars['CLAUDE_CODE_OAUTH_TOKEN'];
+ hasClaudeToken = !!claudeOAuthToken && claudeOAuthToken.length > 0;
+ }
+
+ // Fallback to global settings if no token in source .env
+ if (!hasClaudeToken && globalSettings.globalClaudeOAuthToken) {
+ claudeOAuthToken = globalSettings.globalClaudeOAuthToken;
+ hasClaudeToken = true;
+ }
+
+ return {
+ success: true,
+ data: {
+ hasClaudeToken,
+ claudeOAuthToken,
+ sourcePath,
+ envExists
+ }
+ };
+ } catch (error) {
+ // Log the error for debugging in production
+ console.error('[AUTOBUILD_SOURCE_ENV_GET] Error:', error);
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to get source env'
+ };
+ }
+ }
+ );
+
+ ipcMain.handle(
+ IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_UPDATE,
+ async (_, config: { claudeOAuthToken?: string }): Promise => {
+ try {
+ const { sourcePath, envPath } = getSourceEnvPath();
+
+ if (!sourcePath || !envPath) {
+ return {
+ success: false,
+ error: 'Auto-build source path not configured. Please set it in Settings.'
+ };
+ }
+
+ // Read existing content or start fresh (avoiding TOCTOU race condition)
+ let existingVars: Record = {};
+ try {
+ const content = readFileSync(envPath, 'utf-8');
+ existingVars = parseEnvFile(content);
+ } catch (_readError) {
+ // File doesn't exist or can't be read - start with empty vars
+ // This is expected for first-time setup
+ }
+
+ // Update with new values
+ if (config.claudeOAuthToken !== undefined) {
+ existingVars['CLAUDE_CODE_OAUTH_TOKEN'] = config.claudeOAuthToken;
+ }
+
+ // Generate content
+ const lines: string[] = [
+ '# Auto Claude Framework Environment Variables',
+ '# Managed by Auto Claude UI',
+ '',
+ '# Claude Code OAuth Token (REQUIRED)',
+ `CLAUDE_CODE_OAUTH_TOKEN=${existingVars['CLAUDE_CODE_OAUTH_TOKEN'] || ''}`,
+ ''
+ ];
+
+ // Preserve other existing variables
+ for (const [key, value] of Object.entries(existingVars)) {
+ if (key !== 'CLAUDE_CODE_OAUTH_TOKEN') {
+ lines.push(`${key}=${value}`);
+ }
+ }
+
+ writeFileSync(envPath, lines.join('\n'));
+
+ return { success: true };
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to update source env'
+ };
+ }
+ }
+ );
+
+ ipcMain.handle(
+ IPC_CHANNELS.AUTOBUILD_SOURCE_ENV_CHECK_TOKEN,
+ async (): Promise> => {
+ try {
+ const { sourcePath, envPath, isProduction } = getSourceEnvPath();
+
+ // Load global settings to check for global token fallback
+ const savedSettings = readSettingsFile();
+ const globalSettings = { ...DEFAULT_APP_SETTINGS, ...savedSettings };
+
+ // Check global token first as it's the primary method
+ const globalToken = globalSettings.globalClaudeOAuthToken;
+ const hasGlobalToken = !!globalToken && globalToken.length > 0;
+
+ if (!sourcePath) {
+ // In production, no source path is acceptable if global token exists
+ if (hasGlobalToken) {
+ return {
+ success: true,
+ data: {
+ hasToken: true,
+ sourcePath: isProduction ? app.getPath('userData') : undefined
+ }
+ };
+ }
+ return {
+ success: true,
+ data: {
+ hasToken: false,
+ error: isProduction
+ ? 'Please configure Claude OAuth token in Settings > API Configuration'
+ : 'Auto-build source path not configured'
+ }
+ };
+ }
+
+ // Check source .env file
+ let hasEnvToken = false;
+ if (envPath && existsSync(envPath)) {
+ const content = readFileSync(envPath, 'utf-8');
+ const vars = parseEnvFile(content);
+ const token = vars['CLAUDE_CODE_OAUTH_TOKEN'];
+ hasEnvToken = !!token && token.length > 0;
+ }
+
+ // Token exists if either source .env has it OR global settings has it
+ const hasToken = hasEnvToken || hasGlobalToken;
+
+ return {
+ success: true,
+ data: {
+ hasToken,
+ sourcePath
+ }
+ };
+ } catch (error) {
+ // Log the error for debugging in production
+ console.error('[AUTOBUILD_SOURCE_ENV_CHECK_TOKEN] Error:', error);
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to check source token'
+ };
+ }
+ }
+ );
}
diff --git a/apps/frontend/src/main/ipc-handlers/shared/label-utils.ts b/apps/frontend/src/main/ipc-handlers/shared/label-utils.ts
new file mode 100644
index 0000000000..d51ee6fbdd
--- /dev/null
+++ b/apps/frontend/src/main/ipc-handlers/shared/label-utils.ts
@@ -0,0 +1,34 @@
+/**
+ * Shared label matching utilities
+ * Used by both GitHub and GitLab spec-utils for category detection
+ */
+
+/**
+ * Escape special regex characters in a string.
+ * This ensures that terms like "c++" or "c#" are matched literally.
+ *
+ * @param str - The string to escape
+ * @returns The escaped string safe for use in a RegExp
+ */
+function escapeRegExp(str: string): string {
+ return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
+}
+
+/**
+ * Check if a label contains a whole-word match for a term.
+ * Uses word boundaries to prevent false positives (e.g., 'acid' matching 'ci').
+ *
+ * The term is escaped to handle regex metacharacters safely, so terms like
+ * "c++" or "c#" are matched literally rather than being interpreted as regex.
+ *
+ * @param label - The label to check (already lowercased)
+ * @param term - The term to search for (will be escaped for regex safety)
+ * @returns true if the label contains the term as a whole word
+ */
+export function labelMatchesWholeWord(label: string, term: string): boolean {
+ // Escape regex metacharacters in the term to match literally
+ const escapedTerm = escapeRegExp(term);
+ // Use word boundary regex to match whole words only
+ const regex = new RegExp(`\\b${escapedTerm}\\b`);
+ return regex.test(label);
+}
diff --git a/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts b/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts
index 232f54bedf..50049f06e8 100644
--- a/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/task/crud-handlers.ts
@@ -194,6 +194,9 @@ export function registerTaskCRUDHandlers(agentManager: AgentManager): void {
updatedAt: new Date()
};
+ // Invalidate cache since a new task was created
+ projectStore.invalidateTasksCache(projectId);
+
return { success: true, data: task };
}
);
@@ -230,6 +233,10 @@ export function registerTaskCRUDHandlers(agentManager: AgentManager): void {
} else {
console.warn(`[TASK_DELETE] Spec directory not found: ${specDir}`);
}
+
+ // Invalidate cache since a task was deleted
+ projectStore.invalidateTasksCache(project.id);
+
return { success: true };
} catch (error) {
console.error('[TASK_DELETE] Error deleting spec directory:', error);
@@ -418,6 +425,9 @@ export function registerTaskCRUDHandlers(agentManager: AgentManager): void {
updatedAt: new Date()
};
+ // Invalidate cache since a task was updated
+ projectStore.invalidateTasksCache(project.id);
+
return { success: true, data: updatedTask };
} catch (error) {
return {
diff --git a/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts b/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts
index 1e0ce9ba52..1626190f76 100644
--- a/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/task/execution-handlers.ts
@@ -2,7 +2,7 @@ import { ipcMain, BrowserWindow } from 'electron';
import { IPC_CHANNELS, AUTO_BUILD_PATHS, getSpecsDir } from '../../../shared/constants';
import type { IPCResult, TaskStartOptions, TaskStatus } from '../../../shared/types';
import path from 'path';
-import { existsSync, readFileSync, writeFileSync, mkdirSync, renameSync, unlinkSync } from 'fs';
+import { existsSync, readFileSync, writeFileSync, renameSync, unlinkSync } from 'fs';
import { spawnSync } from 'child_process';
import { AgentManager } from '../../agent';
import { fileWatcher } from '../../file-watcher';
@@ -12,9 +12,10 @@ import { getClaudeProfileManager } from '../../claude-profile-manager';
import {
getPlanPath,
persistPlanStatus,
- persistPlanStatusSync,
createPlanIfNotExists
} from './plan-file-utils';
+import { findTaskWorktree } from '../../worktree-paths';
+import { projectStore } from '../../project-store';
/**
* Atomic file write to prevent TOCTOU race conditions.
@@ -192,7 +193,8 @@ export function registerTaskExecutionHandlers(
{
parallel: false, // Sequential for planning phase
workers: 1,
- baseBranch
+ baseBranch,
+ useWorktree: task.metadata?.useWorktree
}
);
} else {
@@ -207,7 +209,8 @@ export function registerTaskExecutionHandlers(
{
parallel: false,
workers: 1,
- baseBranch
+ baseBranch,
+ useWorktree: task.metadata?.useWorktree
}
);
}
@@ -236,7 +239,7 @@ export function registerTaskExecutionHandlers(
setImmediate(async () => {
const persistStart = Date.now();
try {
- const persisted = await persistPlanStatus(planPath, 'in_progress');
+ const persisted = await persistPlanStatus(planPath, 'in_progress', project.id);
if (persisted) {
console.warn('[TASK_START] Updated plan status to: in_progress');
}
@@ -288,7 +291,7 @@ export function registerTaskExecutionHandlers(
setImmediate(async () => {
const persistStart = Date.now();
try {
- const persisted = await persistPlanStatus(planPath, 'backlog');
+ const persisted = await persistPlanStatus(planPath, 'backlog', project.id);
if (persisted) {
console.warn('[TASK_STOP] Updated plan status to backlog');
}
@@ -332,9 +335,9 @@ export function registerTaskExecutionHandlers(
);
// Check if worktree exists - QA needs to run in the worktree where the build happened
- const worktreePath = path.join(project.path, '.worktrees', task.specId);
- const worktreeSpecDir = path.join(worktreePath, specsBaseDir, task.specId);
- const hasWorktree = existsSync(worktreePath);
+ const worktreePath = findTaskWorktree(project.path, task.specId);
+ const worktreeSpecDir = worktreePath ? path.join(worktreePath, specsBaseDir, task.specId) : null;
+ const hasWorktree = worktreePath !== null;
if (approved) {
// Write approval to QA report
@@ -382,14 +385,14 @@ export function registerTaskExecutionHandlers(
}
// Step 3: Clean untracked files that came from the merge
- // IMPORTANT: Exclude .auto-claude and .worktrees directories to preserve specs and worktree data
- const cleanResult = spawnSync('git', ['clean', '-fd', '-e', '.auto-claude', '-e', '.worktrees'], {
+ // IMPORTANT: Exclude .auto-claude directory to preserve specs and worktree data
+ const cleanResult = spawnSync('git', ['clean', '-fd', '-e', '.auto-claude'], {
cwd: project.path,
encoding: 'utf-8',
stdio: 'pipe'
});
if (cleanResult.status === 0) {
- console.log('[TASK_REVIEW] Cleaned untracked files in main (excluding .auto-claude and .worktrees)');
+ console.log('[TASK_REVIEW] Cleaned untracked files in main (excluding .auto-claude)');
}
console.log('[TASK_REVIEW] Main branch restored to pre-merge state');
@@ -397,7 +400,7 @@ export function registerTaskExecutionHandlers(
// Write feedback for QA fixer - write to WORKTREE spec dir if it exists
// The QA process runs in the worktree where the build and implementation_plan.json are
- const targetSpecDir = hasWorktree ? worktreeSpecDir : specDir;
+ const targetSpecDir = hasWorktree && worktreeSpecDir ? worktreeSpecDir : specDir;
const fixRequestPath = path.join(targetSpecDir, 'QA_FIX_REQUEST.md');
console.warn('[TASK_REVIEW] Writing QA fix request to:', fixRequestPath);
@@ -453,9 +456,9 @@ export function registerTaskExecutionHandlers(
// Validate status transition - 'done' can only be set through merge handler
// UNLESS there's no worktree (limbo state - already merged/discarded or failed)
if (status === 'done') {
- // Check if worktree exists
- const worktreePath = path.join(project.path, '.worktrees', taskId);
- const hasWorktree = existsSync(worktreePath);
+ // Check if worktree exists (task.specId matches worktree folder name)
+ const worktreePath = findTaskWorktree(project.path, task.specId);
+ const hasWorktree = worktreePath !== null;
if (hasWorktree) {
// Worktree exists - must use merge workflow
@@ -508,11 +511,13 @@ export function registerTaskExecutionHandlers(
try {
// Use shared utility for thread-safe plan file updates
- const persisted = await persistPlanStatus(planPath, status);
+ const persisted = await persistPlanStatus(planPath, status, project.id);
if (!persisted) {
// If no implementation plan exists yet, create a basic one
await createPlanIfNotExists(planPath, task, status);
+ // Invalidate cache after creating new plan
+ projectStore.invalidateTasksCache(project.id);
}
// Auto-stop task when status changes AWAY from 'in_progress' and process IS running
@@ -585,7 +590,8 @@ export function registerTaskExecutionHandlers(
{
parallel: false,
workers: 1,
- baseBranch: baseBranchForUpdate
+ baseBranch: baseBranchForUpdate,
+ useWorktree: task.metadata?.useWorktree
}
);
} else {
@@ -599,7 +605,8 @@ export function registerTaskExecutionHandlers(
{
parallel: false,
workers: 1,
- baseBranch: baseBranchForUpdate
+ baseBranch: baseBranchForUpdate,
+ useWorktree: task.metadata?.useWorktree
}
);
}
@@ -671,17 +678,35 @@ export function registerTaskExecutionHandlers(
return { success: false, error: 'Task not found' };
}
- // Get the spec directory
- const autoBuildDir = project.autoBuildPath || '.auto-claude';
- const specDir = path.join(
+ // Get the spec directory - use task.specsPath if available (handles worktree vs main)
+ // This is critical: task might exist in worktree, and getTasks() prefers worktree version.
+ // If we write to main project but task is in worktree, the worktree's old status takes precedence on refresh.
+ const specDir = task.specsPath || path.join(
project.path,
- autoBuildDir,
- 'specs',
+ getSpecsDir(project.autoBuildPath),
task.specId
);
// Update implementation_plan.json
const planPath = path.join(specDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN);
+ console.log(`[Recovery] Writing to plan file at: ${planPath} (task location: ${task.location || 'main'})`);
+
+ // Also update the OTHER location if task exists in both main and worktree
+ // This ensures consistency regardless of which version getTasks() prefers
+ const specsBaseDir = getSpecsDir(project.autoBuildPath);
+ const mainSpecDir = path.join(project.path, specsBaseDir, task.specId);
+ const worktreePath = findTaskWorktree(project.path, task.specId);
+ const worktreeSpecDir = worktreePath ? path.join(worktreePath, specsBaseDir, task.specId) : null;
+
+ // Collect all plan file paths that need updating
+ const planPathsToUpdate: string[] = [planPath];
+ if (mainSpecDir !== specDir && existsSync(path.join(mainSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN))) {
+ planPathsToUpdate.push(path.join(mainSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN));
+ }
+ if (worktreeSpecDir && worktreeSpecDir !== specDir && existsSync(path.join(worktreeSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN))) {
+ planPathsToUpdate.push(path.join(worktreeSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN));
+ }
+ console.log(`[Recovery] Will update ${planPathsToUpdate.length} plan file(s):`, planPathsToUpdate);
try {
// Read the plan to analyze subtask progress
@@ -743,14 +768,25 @@ export function registerTaskExecutionHandlers(
// Just update status in plan file (project store reads from file, no separate update needed)
plan.status = 'human_review';
plan.planStatus = 'review';
- try {
- // Use atomic write to prevent TOCTOU race conditions
- atomicWriteFileSync(planPath, JSON.stringify(plan, null, 2));
- } catch (writeError) {
- console.error('[Recovery] Failed to write plan file:', writeError);
+
+ // Write to ALL plan file locations to ensure consistency
+ const planContent = JSON.stringify(plan, null, 2);
+ let writeSucceededForComplete = false;
+ for (const pathToUpdate of planPathsToUpdate) {
+ try {
+ atomicWriteFileSync(pathToUpdate, planContent);
+ console.log(`[Recovery] Successfully wrote to: ${pathToUpdate}`);
+ writeSucceededForComplete = true;
+ } catch (writeError) {
+ console.error(`[Recovery] Failed to write plan file at ${pathToUpdate}:`, writeError);
+ // Continue trying other paths
+ }
+ }
+
+ if (!writeSucceededForComplete) {
return {
success: false,
- error: 'Failed to write plan file'
+ error: 'Failed to write plan file during recovery (all locations failed)'
};
}
@@ -797,11 +833,19 @@ export function registerTaskExecutionHandlers(
}
}
- try {
- // Use atomic write to prevent TOCTOU race conditions
- atomicWriteFileSync(planPath, JSON.stringify(plan, null, 2));
- } catch (writeError) {
- console.error('[Recovery] Failed to write plan file:', writeError);
+ // Write to ALL plan file locations to ensure consistency
+ const planContent = JSON.stringify(plan, null, 2);
+ let writeSucceeded = false;
+ for (const pathToUpdate of planPathsToUpdate) {
+ try {
+ atomicWriteFileSync(pathToUpdate, planContent);
+ console.log(`[Recovery] Successfully wrote to: ${pathToUpdate}`);
+ writeSucceeded = true;
+ } catch (writeError) {
+ console.error(`[Recovery] Failed to write plan file at ${pathToUpdate}:`, writeError);
+ }
+ }
+ if (!writeSucceeded) {
return {
success: false,
error: 'Failed to write plan file during recovery'
@@ -853,17 +897,20 @@ export function registerTaskExecutionHandlers(
// Set status to in_progress for the restart
newStatus = 'in_progress';
- // Update plan status for restart
+ // Update plan status for restart - write to ALL locations
if (plan) {
plan.status = 'in_progress';
plan.planStatus = 'in_progress';
- try {
- // Use atomic write to prevent TOCTOU race conditions
- atomicWriteFileSync(planPath, JSON.stringify(plan, null, 2));
- } catch (writeError) {
- console.error('[Recovery] Failed to write plan file for restart:', writeError);
- // Continue with restart attempt even if file write fails
- // The plan status will be updated by the agent when it starts
+ const restartPlanContent = JSON.stringify(plan, null, 2);
+ for (const pathToUpdate of planPathsToUpdate) {
+ try {
+ atomicWriteFileSync(pathToUpdate, restartPlanContent);
+ console.log(`[Recovery] Wrote restart status to: ${pathToUpdate}`);
+ } catch (writeError) {
+ console.error(`[Recovery] Failed to write plan file for restart at ${pathToUpdate}:`, writeError);
+ // Continue with restart attempt even if file write fails
+ // The plan status will be updated by the agent when it starts
+ }
}
}
@@ -896,7 +943,8 @@ export function registerTaskExecutionHandlers(
{
parallel: false,
workers: 1,
- baseBranch: baseBranchForRecovery
+ baseBranch: baseBranchForRecovery,
+ useWorktree: task.metadata?.useWorktree
}
);
}
diff --git a/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts b/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts
index 6d810f3aea..77fbda79c7 100644
--- a/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts
+++ b/apps/frontend/src/main/ipc-handlers/task/plan-file-utils.ts
@@ -21,6 +21,7 @@ import path from 'path';
import { readFileSync, writeFileSync, mkdirSync } from 'fs';
import { AUTO_BUILD_PATHS, getSpecsDir } from '../../../shared/constants';
import type { TaskStatus, Project, Task } from '../../../shared/types';
+import { projectStore } from '../../project-store';
// In-memory locks for plan file operations
// Key: plan file path, Value: Promise chain for serializing operations
@@ -80,6 +81,8 @@ export function mapStatusToPlanStatus(status: TaskStatus): string {
case 'ai_review':
case 'human_review':
return 'review';
+ case 'pr_created':
+ return 'pr_created';
case 'done':
return 'completed';
default:
@@ -93,11 +96,13 @@ export function mapStatusToPlanStatus(status: TaskStatus): string {
*
* @param planPath - Path to the implementation_plan.json file
* @param status - The TaskStatus to persist
+ * @param projectId - Optional project ID to invalidate cache (recommended for performance)
* @returns true if status was persisted, false if plan file doesn't exist
*/
-export async function persistPlanStatus(planPath: string, status: TaskStatus): Promise {
+export async function persistPlanStatus(planPath: string, status: TaskStatus, projectId?: string): Promise {
return withPlanLock(planPath, async () => {
try {
+ console.warn(`[plan-file-utils] Reading implementation_plan.json to update status to: ${status}`, { planPath });
// Read file directly without existence check to avoid TOCTOU race condition
const planContent = readFileSync(planPath, 'utf-8');
const plan = JSON.parse(planContent);
@@ -107,10 +112,18 @@ export async function persistPlanStatus(planPath: string, status: TaskStatus): P
plan.updated_at = new Date().toISOString();
writeFileSync(planPath, JSON.stringify(plan, null, 2));
+ console.warn(`[plan-file-utils] Successfully persisted status: ${status} to implementation_plan.json`);
+
+ // Invalidate tasks cache since status changed
+ if (projectId) {
+ projectStore.invalidateTasksCache(projectId);
+ }
+
return true;
} catch (err) {
// File not found is expected - return false
if (isFileNotFoundError(err)) {
+ console.warn(`[plan-file-utils] implementation_plan.json not found at ${planPath} - status not persisted`);
return false;
}
console.warn(`[plan-file-utils] Could not persist status to ${planPath}:`, err);
@@ -141,9 +154,10 @@ export async function persistPlanStatus(planPath: string, status: TaskStatus): P
*
* @param planPath - Path to the implementation_plan.json file
* @param status - The TaskStatus to persist
+ * @param projectId - Optional project ID to invalidate cache (recommended for performance)
* @returns true if status was persisted, false otherwise
*/
-export function persistPlanStatusSync(planPath: string, status: TaskStatus): boolean {
+export function persistPlanStatusSync(planPath: string, status: TaskStatus, projectId?: string): boolean {
try {
// Read file directly without existence check to avoid TOCTOU race condition
const planContent = readFileSync(planPath, 'utf-8');
@@ -154,6 +168,12 @@ export function persistPlanStatusSync(planPath: string, status: TaskStatus): boo
plan.updated_at = new Date().toISOString();
writeFileSync(planPath, JSON.stringify(plan, null, 2));
+
+ // Invalidate tasks cache since status changed
+ if (projectId) {
+ projectStore.invalidateTasksCache(projectId);
+ }
+
return true;
} catch (err) {
// File not found is expected - return false
@@ -178,6 +198,7 @@ export async function updatePlanFile>(
): Promise {
return withPlanLock(planPath, async () => {
try {
+ console.warn(`[plan-file-utils] Reading implementation_plan.json for update`, { planPath });
// Read file directly without existence check to avoid TOCTOU race condition
const planContent = readFileSync(planPath, 'utf-8');
const plan = JSON.parse(planContent) as T;
@@ -187,10 +208,12 @@ export async function updatePlanFile>(
(updatedPlan as Record).updated_at = new Date().toISOString();
writeFileSync(planPath, JSON.stringify(updatedPlan, null, 2));
+ console.warn(`[plan-file-utils] Successfully updated implementation_plan.json`);
return updatedPlan;
} catch (err) {
// File not found is expected - return null
if (isFileNotFoundError(err)) {
+ console.warn(`[plan-file-utils] implementation_plan.json not found at ${planPath} - update skipped`);
return null;
}
console.warn(`[plan-file-utils] Could not update plan at ${planPath}:`, err);
@@ -247,3 +270,41 @@ export async function createPlanIfNotExists(
writeFileSync(planPath, JSON.stringify(plan, null, 2));
});
}
+
+/**
+ * Update task_metadata.json to add PR URL.
+ * This is a simple JSON file update (no locking needed as it's rarely updated concurrently).
+ *
+ * @param metadataPath - Path to the task_metadata.json file
+ * @param prUrl - The PR URL to add to metadata
+ * @returns true if metadata was updated, false if file doesn't exist or failed
+ */
+export function updateTaskMetadataPrUrl(metadataPath: string, prUrl: string): boolean {
+ try {
+ let metadata: Record = {};
+
+ // Try to read existing metadata
+ try {
+ const content = readFileSync(metadataPath, 'utf-8');
+ metadata = JSON.parse(content);
+ } catch (err) {
+ if (!isFileNotFoundError(err)) {
+ throw err;
+ }
+ // File doesn't exist, will create new one
+ }
+
+ // Update with prUrl
+ metadata.prUrl = prUrl;
+
+ // Ensure parent directory exists before writing
+ mkdirSync(path.dirname(metadataPath), { recursive: true });
+
+ // Write back
+ writeFileSync(metadataPath, JSON.stringify(metadata, null, 2));
+ return true;
+ } catch (err) {
+ console.warn(`[plan-file-utils] Could not update metadata at ${metadataPath}:`, err);
+ return false;
+ }
+}
diff --git a/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts b/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts
index a9edf89c6f..30e8ca520d 100644
--- a/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/task/worktree-handlers.ts
@@ -1,17 +1,37 @@
import { ipcMain, BrowserWindow, shell, app } from 'electron';
-import { IPC_CHANNELS, AUTO_BUILD_PATHS, DEFAULT_APP_SETTINGS, DEFAULT_FEATURE_MODELS, DEFAULT_FEATURE_THINKING, MODEL_ID_MAP, THINKING_BUDGET_MAP } from '../../../shared/constants';
-import type { IPCResult, WorktreeStatus, WorktreeDiff, WorktreeDiffFile, WorktreeMergeResult, WorktreeDiscardResult, WorktreeListResult, WorktreeListItem, SupportedIDE, SupportedTerminal, AppSettings } from '../../../shared/types';
+import { IPC_CHANNELS, AUTO_BUILD_PATHS, DEFAULT_APP_SETTINGS, DEFAULT_FEATURE_MODELS, DEFAULT_FEATURE_THINKING, MODEL_ID_MAP, THINKING_BUDGET_MAP, getSpecsDir } from '../../../shared/constants';
+import type { IPCResult, WorktreeStatus, WorktreeDiff, WorktreeDiffFile, WorktreeMergeResult, WorktreeDiscardResult, WorktreeListResult, WorktreeListItem, WorktreeCreatePROptions, WorktreeCreatePRResult, SupportedIDE, SupportedTerminal, AppSettings } from '../../../shared/types';
import path from 'path';
import { existsSync, readdirSync, statSync, readFileSync } from 'fs';
import { execSync, execFileSync, spawn, spawnSync, exec, execFile } from 'child_process';
+import { createRequire } from 'module';
+const require = createRequire(import.meta.url);
+const { minimatch } = require('minimatch');
import { projectStore } from '../../project-store';
import { getConfiguredPythonPath, PythonEnvManager, pythonEnvManager as pythonEnvManagerSingleton } from '../../python-env-manager';
-import { getEffectiveSourcePath } from '../../auto-claude-updater';
+import { getEffectiveSourcePath } from '../../updater/path-resolver';
import { getProfileEnv } from '../../rate-limit-detector';
import { findTaskAndProject } from './shared';
import { parsePythonCommand } from '../../python-detector';
import { getToolPath } from '../../cli-tool-manager';
import { promisify } from 'util';
+import {
+ getTaskWorktreeDir,
+ findTaskWorktree,
+} from '../../worktree-paths';
+import { persistPlanStatus, updateTaskMetadataPrUrl } from './plan-file-utils';
+
+// Regex pattern for validating git branch names
+const GIT_BRANCH_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9._/-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$/;
+
+// Maximum PR title length (GitHub's limit is 256 characters)
+const MAX_PR_TITLE_LENGTH = 256;
+
+// Regex for validating PR title contains only printable characters
+const PRINTABLE_CHARS_REGEX = /^[\x20-\x7E\u00A0-\uFFFF]*$/;
+
+// Timeout for PR creation operations (2 minutes for network operations)
+const PR_CREATION_TIMEOUT_MS = 120000;
/**
* Read utility feature settings (for commit message, merge resolver) from settings file
@@ -55,6 +75,145 @@ function getUtilitySettings(): { model: string; modelId: string; thinkingLevel:
const execAsync = promisify(exec);
const execFileAsync = promisify(execFile);
+/**
+ * Check if a repository is misconfigured as bare but has source files.
+ * If so, automatically fix the configuration by unsetting core.bare.
+ *
+ * This can happen when git worktree operations incorrectly set bare=true,
+ * or when users manually misconfigure the repository.
+ *
+ * @param projectPath - Path to check and potentially fix
+ * @returns true if fixed, false if no fix needed or not fixable
+ */
+function fixMisconfiguredBareRepo(projectPath: string): boolean {
+ try {
+ // Check if bare=true is set
+ const bareConfig = execFileSync(
+ getToolPath('git'),
+ ['config', '--get', 'core.bare'],
+ { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
+ ).trim().toLowerCase();
+
+ if (bareConfig !== 'true') {
+ return false; // Not marked as bare, nothing to fix
+ }
+
+ // Check if there are source files (indicating misconfiguration)
+ // A truly bare repo would only have git internals, not source code
+ // This covers multiple ecosystems: JS/TS, Python, Rust, Go, Java, C#, etc.
+ //
+ // Markers are separated into exact matches and glob patterns for efficiency.
+ // Exact matches use existsSync() directly, while glob patterns use minimatch
+ // against a cached directory listing.
+ const EXACT_MARKERS = [
+ // JavaScript/TypeScript ecosystem
+ 'package.json', 'apps', 'src',
+ // Python ecosystem
+ 'pyproject.toml', 'setup.py', 'requirements.txt', 'Pipfile',
+ // Rust ecosystem
+ 'Cargo.toml',
+ // Go ecosystem
+ 'go.mod', 'go.sum', 'cmd', 'main.go',
+ // Java/JVM ecosystem
+ 'pom.xml', 'build.gradle', 'build.gradle.kts',
+ // Ruby ecosystem
+ 'Gemfile', 'Rakefile',
+ // PHP ecosystem
+ 'composer.json',
+ // General project markers
+ 'Makefile', 'CMakeLists.txt', 'README.md', 'LICENSE'
+ ];
+
+ const GLOB_MARKERS = [
+ // .NET/C# ecosystem - patterns that need glob matching
+ '*.csproj', '*.sln', '*.fsproj'
+ ];
+
+ // Check exact matches first (fast path)
+ const hasExactMatch = EXACT_MARKERS.some(marker =>
+ existsSync(path.join(projectPath, marker))
+ );
+
+ if (hasExactMatch) {
+ // Found a project marker, proceed to fix
+ } else {
+ // Check glob patterns - read directory once and cache for all patterns
+ let directoryFiles: string[] | null = null;
+ const MAX_FILES_TO_CHECK = 500; // Limit to avoid reading huge directories
+
+ const hasGlobMatch = GLOB_MARKERS.some(pattern => {
+ // Validate pattern - only support simple glob patterns for security
+ if (pattern.includes('..') || pattern.includes('/')) {
+ console.warn(`[GIT] Unsupported glob pattern ignored: ${pattern}`);
+ return false;
+ }
+
+ // Lazy-load directory listing, cached across patterns
+ if (directoryFiles === null) {
+ try {
+ const allFiles = readdirSync(projectPath);
+ // Limit to first N entries to avoid performance issues
+ directoryFiles = allFiles.slice(0, MAX_FILES_TO_CHECK);
+ if (allFiles.length > MAX_FILES_TO_CHECK) {
+ console.warn(`[GIT] Directory has ${allFiles.length} entries, checking only first ${MAX_FILES_TO_CHECK}`);
+ }
+ } catch (error) {
+ // Log the error for debugging instead of silently swallowing
+ console.warn(`[GIT] Failed to read directory ${projectPath}:`, error instanceof Error ? error.message : String(error));
+ directoryFiles = [];
+ }
+ }
+
+ // Use minimatch for proper glob pattern matching
+ return directoryFiles.some(file => minimatch(file, pattern, { nocase: true }));
+ });
+
+ if (!hasGlobMatch) {
+ return false; // Legitimately bare repo
+ }
+ }
+
+ // Fix the misconfiguration
+ console.warn('[GIT] Detected misconfigured bare repository with source files. Auto-fixing by unsetting core.bare...');
+ execFileSync(
+ getToolPath('git'),
+ ['config', '--unset', 'core.bare'],
+ { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
+ );
+ console.warn('[GIT] Fixed: core.bare has been unset. Git operations should now work correctly.');
+ return true;
+ } catch {
+ return false;
+ }
+}
+
+/**
+ * Check if a path is a valid git working tree (not a bare repository).
+ * Returns true if the path is inside a git repository with a working tree.
+ *
+ * NOTE: This is a pure check with no side-effects. If you need to fix
+ * misconfigured bare repos before an operation, call fixMisconfiguredBareRepo()
+ * explicitly before calling this function.
+ *
+ * @param projectPath - Path to check
+ * @returns true if it's a valid working tree, false if bare or not a git repo
+ */
+function isGitWorkTree(projectPath: string): boolean {
+ try {
+ // Use git rev-parse --is-inside-work-tree which returns "true" for working trees
+ // and fails for bare repos or non-git directories
+ const result = execFileSync(
+ getToolPath('git'),
+ ['rev-parse', '--is-inside-work-tree'],
+ { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
+ );
+ return result.trim() === 'true';
+ } catch {
+ // Not a working tree (could be bare repo or not a git repo at all)
+ return false;
+ }
+}
+
/**
* IDE and Terminal detection and launching utilities
*/
@@ -674,12 +833,14 @@ const TERMINAL_DETECTION: Partial '\''
+function escapeSingleQuotedPath(dirPath: string): string {
+ // Single quotes are escaped by ending the string, adding an escaped quote,
+ // and starting a new string: ' -> '\''
+ // This pattern works in both AppleScript and POSIX shells (bash, sh, zsh)
return dirPath.replace(/'/g, "'\\''");
}
@@ -1069,8 +1230,8 @@ async function openInTerminal(dirPath: string, terminal: SupportedTerminal, cust
if (platform === 'darwin') {
// macOS: Use open command with the directory
- // Escape single quotes in dirPath to prevent AppleScript injection
- const escapedPath = escapeAppleScriptPath(dirPath);
+ // Escape single quotes in dirPath to prevent script injection
+ const escapedPath = escapeSingleQuotedPath(dirPath);
if (terminal === 'system') {
// Use AppleScript to open Terminal.app at the directory
@@ -1112,7 +1273,7 @@ async function openInTerminal(dirPath: string, terminal: SupportedTerminal, cust
} catch {
// xterm doesn't have --working-directory, use -e with a script
// Escape the path for shell use within the xterm command
- const escapedPath = escapeAppleScriptPath(dirPath);
+ const escapedPath = escapeSingleQuotedPath(dirPath);
await execFileAsync('xterm', ['-e', `cd '${escapedPath}' && bash`]);
}
}
@@ -1139,7 +1300,10 @@ function getTaskBaseBranch(specDir: string): string | undefined {
if (existsSync(metadataPath)) {
const metadata = JSON.parse(readFileSync(metadataPath, 'utf-8'));
// Return baseBranch if explicitly set (not the __project_default__ marker)
- if (metadata.baseBranch && metadata.baseBranch !== '__project_default__') {
+ // Also validate it's a valid branch name to prevent malformed git commands
+ if (metadata.baseBranch &&
+ metadata.baseBranch !== '__project_default__' &&
+ GIT_BRANCH_REGEX.test(metadata.baseBranch)) {
return metadata.baseBranch;
}
}
@@ -1149,6 +1313,309 @@ function getTaskBaseBranch(specDir: string): string | undefined {
return undefined;
}
+/**
+ * Get the effective base branch for a task with proper fallback chain.
+ * Priority:
+ * 1. Task metadata baseBranch (explicit task-level override from task_metadata.json)
+ * 2. Project settings mainBranch (project-level default)
+ * 3. Git default branch detection (main/master)
+ * 4. Fallback to 'main'
+ *
+ * This should be used instead of getting the current HEAD branch,
+ * as the user may be on a feature branch when viewing worktree status.
+ */
+function getEffectiveBaseBranch(projectPath: string, specId: string, projectMainBranch?: string): string {
+ // 1. Try task metadata baseBranch
+ const specDir = path.join(projectPath, '.auto-claude', 'specs', specId);
+ const taskBaseBranch = getTaskBaseBranch(specDir);
+ if (taskBaseBranch) {
+ return taskBaseBranch;
+ }
+
+ // 2. Try project settings mainBranch
+ if (projectMainBranch && GIT_BRANCH_REGEX.test(projectMainBranch)) {
+ return projectMainBranch;
+ }
+
+ // 3. Try to detect main/master branch
+ for (const branch of ['main', 'master']) {
+ try {
+ execFileSync(getToolPath('git'), ['rev-parse', '--verify', branch], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ });
+ return branch;
+ } catch {
+ // Branch doesn't exist, try next
+ }
+ }
+
+ // 4. Fallback to 'main'
+ return 'main';
+}
+
+// ============================================
+// Helper functions for TASK_WORKTREE_CREATE_PR
+// ============================================
+
+/**
+ * Result of parsing JSON output from the create-pr Python script
+ */
+interface ParsedPRResult {
+ success: boolean;
+ prUrl?: string;
+ alreadyExists?: boolean;
+ error?: string;
+}
+
+/**
+ * Validate that a URL is a valid GitHub PR URL.
+ * Supports both github.com and GitHub Enterprise instances (custom domains).
+ * Only requires HTTPS protocol and non-empty hostname to allow any GH Enterprise URL.
+ * @returns true if the URL is a valid HTTPS URL with a non-empty hostname
+ */
+function isValidGitHubUrl(url: string): boolean {
+ try {
+ const parsed = new URL(url);
+ // Only require HTTPS with non-empty hostname
+ // This supports GH Enterprise instances with custom domains
+ // The URL comes from gh CLI output which we trust to be valid
+ return parsed.protocol === 'https:' && parsed.hostname.length > 0;
+ } catch {
+ return false;
+ }
+}
+
+/**
+ * Parse JSON output from the create-pr Python script
+ * Handles both snake_case and camelCase field names
+ * @returns ParsedPRResult if valid JSON found, null otherwise
+ */
+function parsePRJsonOutput(stdout: string): ParsedPRResult | null {
+ // Find the last complete JSON object in stdout (non-greedy, handles multiple objects)
+ const jsonMatches = stdout.match(/\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}/g);
+ const jsonMatch = jsonMatches && jsonMatches.length > 0 ? jsonMatches[jsonMatches.length - 1] : null;
+
+ if (!jsonMatch) {
+ return null;
+ }
+
+ try {
+ const parsed = JSON.parse(jsonMatch);
+
+ // Validate parsed JSON has expected shape
+ if (typeof parsed !== 'object' || parsed === null) {
+ return null;
+ }
+
+ // Extract and validate fields with proper type checking
+ // Handle both snake_case (from Python) and camelCase field names
+ // Default success to false to avoid masking failures when field is missing
+ const rawPrUrl = typeof parsed.pr_url === 'string' ? parsed.pr_url :
+ typeof parsed.prUrl === 'string' ? parsed.prUrl : undefined;
+
+ // Validate PR URL is a valid GitHub URL for robustness
+ const validatedPrUrl = rawPrUrl && isValidGitHubUrl(rawPrUrl) ? rawPrUrl : undefined;
+
+ return {
+ success: typeof parsed.success === 'boolean' ? parsed.success : false,
+ prUrl: validatedPrUrl,
+ alreadyExists: typeof parsed.already_exists === 'boolean' ? parsed.already_exists :
+ typeof parsed.alreadyExists === 'boolean' ? parsed.alreadyExists : undefined,
+ error: typeof parsed.error === 'string' ? parsed.error : undefined
+ };
+ } catch {
+ return null;
+ }
+}
+
+/**
+ * Result of updating task status after PR creation
+ */
+interface TaskStatusUpdateResult {
+ mainProjectStatus: boolean;
+ mainProjectMetadata: boolean;
+ worktreeStatus: boolean;
+ worktreeMetadata: boolean;
+}
+
+/**
+ * Update task status and metadata after PR creation
+ * Updates both main project and worktree locations
+ * @returns Result object indicating which updates succeeded/failed
+ */
+async function updateTaskStatusAfterPRCreation(
+ specDir: string,
+ worktreePath: string | null,
+ prUrl: string,
+ autoBuildPath: string | undefined,
+ specId: string,
+ debug: (...args: unknown[]) => void
+): Promise {
+ const result: TaskStatusUpdateResult = {
+ mainProjectStatus: false,
+ mainProjectMetadata: false,
+ worktreeStatus: false,
+ worktreeMetadata: false
+ };
+
+ const planPath = path.join(specDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN);
+ const metadataPath = path.join(specDir, 'task_metadata.json');
+
+ // Await status persistence to ensure completion before resolving
+ try {
+ const persisted = await persistPlanStatus(planPath, 'pr_created');
+ result.mainProjectStatus = persisted;
+ debug('Main project status persisted to pr_created:', persisted);
+ } catch (err) {
+ debug('Failed to persist main project status:', err);
+ }
+
+ // Update metadata with prUrl in main project
+ result.mainProjectMetadata = updateTaskMetadataPrUrl(metadataPath, prUrl);
+ debug('Main project metadata updated with prUrl:', result.mainProjectMetadata);
+
+ // Also persist to WORKTREE location (worktree takes priority when loading tasks)
+ // This ensures the status persists after refresh since getTasks() prefers worktree version
+ if (worktreePath) {
+ const specsBaseDir = getSpecsDir(autoBuildPath);
+ const worktreePlanPath = path.join(worktreePath, specsBaseDir, specId, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN);
+ const worktreeMetadataPath = path.join(worktreePath, specsBaseDir, specId, 'task_metadata.json');
+
+ try {
+ const persisted = await persistPlanStatus(worktreePlanPath, 'pr_created');
+ result.worktreeStatus = persisted;
+ debug('Worktree status persisted to pr_created:', persisted);
+ } catch (err) {
+ debug('Failed to persist worktree status:', err);
+ }
+
+ result.worktreeMetadata = updateTaskMetadataPrUrl(worktreeMetadataPath, prUrl);
+ debug('Worktree metadata updated with prUrl:', result.worktreeMetadata);
+ }
+
+ return result;
+}
+
+/**
+ * Build arguments for the create-pr Python script
+ */
+function buildCreatePRArgs(
+ runScript: string,
+ specId: string,
+ projectPath: string,
+ options: WorktreeCreatePROptions | undefined,
+ taskBaseBranch: string | undefined
+): { args: string[]; validationError?: string } {
+ const args = [
+ runScript,
+ '--spec', specId,
+ '--project-dir', projectPath,
+ '--create-pr'
+ ];
+
+ // Add optional arguments with validation
+ if (options?.targetBranch) {
+ // Validate branch name to prevent malformed git commands
+ if (!GIT_BRANCH_REGEX.test(options.targetBranch)) {
+ return { args: [], validationError: 'Invalid target branch name' };
+ }
+ args.push('--pr-target', options.targetBranch);
+ }
+ if (options?.title) {
+ // Validate title for printable characters and length limit
+ if (options.title.length > MAX_PR_TITLE_LENGTH) {
+ return { args: [], validationError: `PR title exceeds maximum length of ${MAX_PR_TITLE_LENGTH} characters` };
+ }
+ if (!PRINTABLE_CHARS_REGEX.test(options.title)) {
+ return { args: [], validationError: 'PR title contains invalid characters' };
+ }
+ args.push('--pr-title', options.title);
+ }
+ if (options?.draft) {
+ args.push('--pr-draft');
+ }
+
+ // Add --base-branch if task was created with a specific base branch
+ if (taskBaseBranch) {
+ args.push('--base-branch', taskBaseBranch);
+ }
+
+ return { args };
+}
+
+/**
+ * Initialize Python environment for PR creation
+ * @returns Error message if initialization fails, undefined on success
+ */
+async function initializePythonEnvForPR(
+ pythonEnvManager: PythonEnvManager
+): Promise {
+ if (pythonEnvManager.isEnvReady()) {
+ return undefined;
+ }
+
+ const autoBuildSource = getEffectiveSourcePath();
+ if (!autoBuildSource) {
+ return 'Python environment not ready and Auto Claude source not found';
+ }
+
+ const status = await pythonEnvManager.initialize(autoBuildSource);
+ if (!status.ready) {
+ return `Python environment not ready: ${status.error || 'Unknown error'}`;
+ }
+
+ return undefined;
+}
+
+/**
+ * Generic retry wrapper with exponential backoff
+ * @param operation - Async function to execute with retry
+ * @param options - Retry configuration options
+ * @returns Result of the operation or throws after all retries
+ */
+async function withRetry(
+ operation: () => Promise,
+ options: {
+ maxRetries?: number;
+ baseDelayMs?: number;
+ onRetry?: (attempt: number, error: unknown) => void;
+ shouldRetry?: (error: unknown) => boolean;
+ } = {}
+): Promise {
+ const { maxRetries: rawMaxRetries = 3, baseDelayMs = 100, onRetry, shouldRetry } = options;
+
+ // Ensure at least one attempt is made (clamp to minimum of 1)
+ const maxRetries = Math.max(1, rawMaxRetries);
+
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
+ try {
+ return await operation();
+ } catch (error) {
+ const isLastAttempt = attempt === maxRetries;
+
+ // Check if we should retry this error
+ if (shouldRetry && !shouldRetry(error)) {
+ throw error;
+ }
+
+ if (isLastAttempt) {
+ throw error;
+ }
+
+ // Notify about retry
+ onRetry?.(attempt, error);
+
+ // Wait before retry (exponential backoff)
+ await new Promise(r => setTimeout(r, baseDelayMs * Math.pow(2, attempt - 1)));
+ }
+ }
+
+ // This should never be reached, but TypeScript needs it
+ throw new Error('Retry loop exited unexpectedly');
+}
+
/**
* Register worktree management handlers
*/
@@ -1158,7 +1625,7 @@ export function registerWorktreeHandlers(
): void {
/**
* Get the worktree status for a task
- * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/
+ * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/
*/
ipcMain.handle(
IPC_CHANNELS.TASK_WORKTREE_STATUS,
@@ -1169,10 +1636,10 @@ export function registerWorktreeHandlers(
return { success: false, error: 'Task not found' };
}
- // Per-spec worktree path: .worktrees/{spec-name}/
- const worktreePath = path.join(project.path, '.worktrees', task.specId);
+ // Find worktree at .auto-claude/worktrees/tasks/{spec-name}/
+ const worktreePath = findTaskWorktree(project.path, task.specId);
- if (!existsSync(worktreePath)) {
+ if (!worktreePath) {
return {
success: true,
data: { exists: false }
@@ -1187,17 +1654,10 @@ export function registerWorktreeHandlers(
encoding: 'utf-8'
}).trim();
- // Get base branch - the current branch in the main project (where changes will be merged)
- // This matches the Python merge logic which merges into the user's current branch
- let baseBranch = 'main';
- try {
- baseBranch = execFileSync(getToolPath('git'), ['rev-parse', '--abbrev-ref', 'HEAD'], {
- cwd: project.path,
- encoding: 'utf-8'
- }).trim();
- } catch {
- baseBranch = 'main';
- }
+ // Get base branch using proper fallback chain:
+ // 1. Task metadata baseBranch, 2. Project settings mainBranch, 3. main/master detection
+ // Note: We do NOT use current HEAD as that may be a feature branch
+ const baseBranch = getEffectiveBaseBranch(project.path, task.specId, project.settings?.mainBranch);
// Get commit count (cross-platform - no shell syntax)
let commitCount = 0;
@@ -1268,7 +1728,7 @@ export function registerWorktreeHandlers(
/**
* Get the diff for a task's worktree
- * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/
+ * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/
*/
ipcMain.handle(
IPC_CHANNELS.TASK_WORKTREE_DIFF,
@@ -1279,23 +1739,17 @@ export function registerWorktreeHandlers(
return { success: false, error: 'Task not found' };
}
- // Per-spec worktree path: .worktrees/{spec-name}/
- const worktreePath = path.join(project.path, '.worktrees', task.specId);
+ // Find worktree at .auto-claude/worktrees/tasks/{spec-name}/
+ const worktreePath = findTaskWorktree(project.path, task.specId);
- if (!existsSync(worktreePath)) {
+ if (!worktreePath) {
return { success: false, error: 'No worktree found for this task' };
}
- // Get base branch - the current branch in the main project (where changes will be merged)
- let baseBranch = 'main';
- try {
- baseBranch = execFileSync(getToolPath('git'), ['rev-parse', '--abbrev-ref', 'HEAD'], {
- cwd: project.path,
- encoding: 'utf-8'
- }).trim();
- } catch {
- baseBranch = 'main';
- }
+ // Get base branch using proper fallback chain:
+ // 1. Task metadata baseBranch, 2. Project settings mainBranch, 3. main/master detection
+ // Note: We do NOT use current HEAD as that may be a feature branch
+ const baseBranch = getEffectiveBaseBranch(project.path, task.specId, project.settings?.mainBranch);
// Get the diff with file stats
const files: WorktreeDiffFile[] = [];
@@ -1370,14 +1824,15 @@ export function registerWorktreeHandlers(
ipcMain.handle(
IPC_CHANNELS.TASK_WORKTREE_MERGE,
async (_, taskId: string, options?: { noCommit?: boolean }): Promise> => {
- // Always log merge operations for debugging
+ const isDebugMode = process.env.DEBUG === 'true' || process.env.NODE_ENV === 'development';
const debug = (...args: unknown[]) => {
- console.warn('[MERGE DEBUG]', ...args);
+ if (isDebugMode) {
+ console.warn('[MERGE DEBUG]', ...args);
+ }
};
try {
- console.warn('[MERGE] Handler called with taskId:', taskId, 'options:', options);
- debug('Starting merge for taskId:', taskId, 'options:', options);
+ debug('Handler called with taskId:', taskId, 'options:', options);
// Ensure Python environment is ready
if (!pythonEnvManager.isEnvReady()) {
@@ -1400,6 +1855,12 @@ export function registerWorktreeHandlers(
debug('Found task:', task.specId, 'project:', project.path);
+ // Auto-fix any misconfigured bare repo before merge operation
+ // This prevents issues where git operations fail due to incorrect bare=true config
+ if (fixMisconfiguredBareRepo(project.path)) {
+ debug('Fixed misconfigured bare repository at:', project.path);
+ }
+
// Use run.py --merge to handle the merge
const sourcePath = getEffectiveSourcePath();
if (!sourcePath) {
@@ -1415,8 +1876,8 @@ export function registerWorktreeHandlers(
}
// Check worktree exists before merge
- const worktreePath = path.join(project.path, '.worktrees', task.specId);
- debug('Worktree path:', worktreePath, 'exists:', existsSync(worktreePath));
+ const worktreePath = findTaskWorktree(project.path, task.specId);
+ debug('Worktree path:', worktreePath, 'exists:', !!worktreePath);
// Check if changes are already staged (for stage-only mode)
if (options?.noCommit) {
@@ -1443,14 +1904,18 @@ export function registerWorktreeHandlers(
}
}
- // Get git status before merge
- try {
- const gitStatusBefore = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' });
- debug('Git status BEFORE merge in main project:\n', gitStatusBefore || '(clean)');
- const gitBranch = execFileSync(getToolPath('git'), ['branch', '--show-current'], { cwd: project.path, encoding: 'utf-8' }).trim();
- debug('Current branch:', gitBranch);
- } catch (e) {
- debug('Failed to get git status before:', e);
+ // Get git status before merge (only if project is a working tree, not a bare repo)
+ if (isGitWorkTree(project.path)) {
+ try {
+ const gitStatusBefore = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' });
+ debug('Git status BEFORE merge in main project:\n', gitStatusBefore || '(clean)');
+ const gitBranch = execFileSync(getToolPath('git'), ['branch', '--show-current'], { cwd: project.path, encoding: 'utf-8' }).trim();
+ debug('Current branch:', gitBranch);
+ } catch (e) {
+ debug('Failed to get git status before:', e);
+ }
+ } else {
+ debug('Project is a bare repository - skipping pre-merge git status check');
}
const args = [
@@ -1465,11 +1930,18 @@ export function registerWorktreeHandlers(
args.push('--no-commit');
}
- // Add --base-branch if task was created with a specific base branch
+ // Add --base-branch with proper priority:
+ // 1. Task metadata baseBranch (explicit task-level override)
+ // 2. Project settings mainBranch (project-level default)
+ // This matches the logic in execution-handlers.ts
const taskBaseBranch = getTaskBaseBranch(specDir);
- if (taskBaseBranch) {
- args.push('--base-branch', taskBaseBranch);
- debug('Using stored base branch:', taskBaseBranch);
+ const projectMainBranch = project.settings?.mainBranch;
+ const effectiveBaseBranch = taskBaseBranch || projectMainBranch;
+
+ if (effectiveBaseBranch) {
+ args.push('--base-branch', effectiveBaseBranch);
+ debug('Using base branch:', effectiveBaseBranch,
+ `(source: ${taskBaseBranch ? 'task metadata' : 'project settings'})`);
}
// Use configured Python path (venv if ready, otherwise bundled/system)
@@ -1594,14 +2066,18 @@ export function registerWorktreeHandlers(
debug('Full stdout:', stdout);
debug('Full stderr:', stderr);
- // Get git status after merge
- try {
- const gitStatusAfter = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' });
- debug('Git status AFTER merge in main project:\n', gitStatusAfter || '(clean)');
- const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' });
- debug('Staged changes:\n', gitDiffStaged || '(none)');
- } catch (e) {
- debug('Failed to get git status after:', e);
+ // Get git status after merge (only if project is a working tree, not a bare repo)
+ if (isGitWorkTree(project.path)) {
+ try {
+ const gitStatusAfter = execFileSync(getToolPath('git'), ['status', '--short'], { cwd: project.path, encoding: 'utf-8' });
+ debug('Git status AFTER merge in main project:\n', gitStatusAfter || '(clean)');
+ const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' });
+ debug('Staged changes:\n', gitDiffStaged || '(none)');
+ } catch (e) {
+ debug('Failed to get git status after:', e);
+ }
+ } else {
+ debug('Project is a bare repository - skipping git status check (this is normal for worktree-based projects)');
}
if (code === 0) {
@@ -1613,33 +2089,39 @@ export function registerWorktreeHandlers(
let mergeAlreadyCommitted = false;
if (isStageOnly) {
- try {
- const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' });
- hasActualStagedChanges = gitDiffStaged.trim().length > 0;
- debug('Stage-only verification: hasActualStagedChanges:', hasActualStagedChanges);
-
- if (!hasActualStagedChanges) {
- // Check if worktree branch was already merged (merge commit exists)
- const specBranch = `auto-claude/${task.specId}`;
- try {
- // Check if current branch contains all commits from spec branch
- // git merge-base --is-ancestor returns exit code 0 if true, 1 if false
- execFileSync(
- 'git',
- ['merge-base', '--is-ancestor', specBranch, 'HEAD'],
- { cwd: project.path, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
- );
- // If we reach here, the command succeeded (exit code 0) - branch is merged
- mergeAlreadyCommitted = true;
- debug('Merge already committed check:', mergeAlreadyCommitted);
- } catch {
- // Exit code 1 means not merged, or branch may not exist
- mergeAlreadyCommitted = false;
- debug('Could not check merge status, assuming not merged');
+ // Only check staged changes if project is a working tree (not bare repo)
+ if (isGitWorkTree(project.path)) {
+ try {
+ const gitDiffStaged = execFileSync(getToolPath('git'), ['diff', '--staged', '--stat'], { cwd: project.path, encoding: 'utf-8' });
+ hasActualStagedChanges = gitDiffStaged.trim().length > 0;
+ debug('Stage-only verification: hasActualStagedChanges:', hasActualStagedChanges);
+
+ if (!hasActualStagedChanges) {
+ // Check if worktree branch was already merged (merge commit exists)
+ const specBranch = `auto-claude/${task.specId}`;
+ try {
+ // Check if current branch contains all commits from spec branch
+ // git merge-base --is-ancestor returns exit code 0 if true, 1 if false
+ execFileSync(
+ getToolPath('git'),
+ ['merge-base', '--is-ancestor', specBranch, 'HEAD'],
+ { cwd: project.path, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
+ );
+ // If we reach here, the command succeeded (exit code 0) - branch is merged
+ mergeAlreadyCommitted = true;
+ debug('Merge already committed check:', mergeAlreadyCommitted);
+ } catch {
+ // Exit code 1 means not merged, or branch may not exist
+ mergeAlreadyCommitted = false;
+ debug('Could not check merge status, assuming not merged');
+ }
}
+ } catch (e) {
+ debug('Failed to verify staged changes:', e);
}
- } catch (e) {
- debug('Failed to verify staged changes:', e);
+ } else {
+ // For bare repos, skip staging verification - merge happens in worktree
+ debug('Project is a bare repository - skipping staged changes verification');
}
}
@@ -1657,6 +2139,33 @@ export function registerWorktreeHandlers(
message = 'Changes were already merged and committed. Task marked as done.';
staged = false;
debug('Stage-only requested but merge already committed. Marking as done.');
+
+ // Clean up worktree since merge is complete (fixes #243)
+ // This is the same cleanup as the full merge path, needed because
+ // stageOnly defaults to true for human_review tasks
+ try {
+ if (worktreePath && existsSync(worktreePath)) {
+ execFileSync(getToolPath('git'), ['worktree', 'remove', '--force', worktreePath], {
+ cwd: project.path,
+ encoding: 'utf-8'
+ });
+ debug('Worktree cleaned up (already merged):', worktreePath);
+
+ // Also delete the task branch
+ const taskBranch = `auto-claude/${task.specId}`;
+ try {
+ execFileSync(getToolPath('git'), ['branch', '-D', taskBranch], {
+ cwd: project.path,
+ encoding: 'utf-8'
+ });
+ debug('Task branch deleted:', taskBranch);
+ } catch {
+ // Branch might not exist or already deleted
+ }
+ }
+ } catch (cleanupErr) {
+ debug('Worktree cleanup failed (non-fatal):', cleanupErr);
+ }
} else if (isStageOnly && !hasActualStagedChanges) {
// Stage-only was requested but no changes to stage (and not committed)
// This could mean nothing to merge or an error - keep in human_review for investigation
@@ -1677,6 +2186,33 @@ export function registerWorktreeHandlers(
planStatus = 'completed';
message = 'Changes merged successfully';
staged = false;
+
+ // Clean up worktree after successful full merge (fixes #243)
+ // This allows drag-to-Done workflow since TASK_UPDATE_STATUS blocks 'done' when worktree exists
+ try {
+ if (worktreePath && existsSync(worktreePath)) {
+ execFileSync(getToolPath('git'), ['worktree', 'remove', '--force', worktreePath], {
+ cwd: project.path,
+ encoding: 'utf-8'
+ });
+ debug('Worktree cleaned up after full merge:', worktreePath);
+
+ // Also delete the task branch since we merged successfully
+ const taskBranch = `auto-claude/${task.specId}`;
+ try {
+ execFileSync(getToolPath('git'), ['branch', '-D', taskBranch], {
+ cwd: project.path,
+ encoding: 'utf-8'
+ });
+ debug('Task branch deleted:', taskBranch);
+ } catch {
+ // Branch might not exist or already deleted
+ }
+ }
+ } catch (cleanupErr) {
+ debug('Worktree cleanup failed (non-fatal):', cleanupErr);
+ // Non-fatal - merge succeeded, cleanup can be done manually
+ }
}
debug('Merge result. isStageOnly:', isStageOnly, 'newStatus:', newStatus, 'staged:', staged);
@@ -1701,57 +2237,66 @@ export function registerWorktreeHandlers(
// Issue #243: We must update BOTH the main project's plan AND the worktree's plan (if it exists)
// because ProjectStore prefers the worktree version when deduplicating tasks.
// OPTIMIZATION: Use async I/O and parallel updates to prevent UI blocking
- const planPaths = [
+ // NOTE: The worktree has the same directory structure as main project
+ const planPaths: { path: string; isMain: boolean }[] = [
{ path: path.join(specDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN), isMain: true },
- { path: path.join(worktreePath, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN), isMain: false }
];
+ // Add worktree plan path if worktree exists
+ if (worktreePath) {
+ const worktreeSpecDir = path.join(worktreePath, project.autoBuildPath || '.auto-claude', 'specs', task.specId);
+ planPaths.push({ path: path.join(worktreeSpecDir, AUTO_BUILD_PATHS.IMPLEMENTATION_PLAN), isMain: false });
+ }
const { promises: fsPromises } = require('fs');
- // Fire and forget - don't block the response on file writes
- // But add retry logic for transient failures and verification
+ // Update plan file with retry logic for transient failures
// Uses EAFP pattern (try/catch) instead of LBYL (existsSync check) to avoid TOCTOU race conditions
- const updatePlanWithRetry = async (planPath: string, isMain: boolean, maxRetries = 3) => {
- for (let attempt = 1; attempt <= maxRetries; attempt++) {
- try {
- const planContent = await fsPromises.readFile(planPath, 'utf-8');
- const plan = JSON.parse(planContent);
- plan.status = newStatus;
- plan.planStatus = planStatus;
- plan.updated_at = new Date().toISOString();
- if (staged) {
- plan.stagedAt = new Date().toISOString();
- plan.stagedInMainProject = true;
- }
- await fsPromises.writeFile(planPath, JSON.stringify(plan, null, 2));
+ const updatePlanWithRetry = async (planPath: string, isMain: boolean): Promise => {
+ // Helper to check if error is ENOENT (file not found)
+ const isFileNotFound = (err: unknown): boolean =>
+ !!(err && typeof err === 'object' && 'code' in err && err.code === 'ENOENT');
- // Verify the write succeeded by reading back
- const verifyContent = await fsPromises.readFile(planPath, 'utf-8');
- const verifyPlan = JSON.parse(verifyContent);
- if (verifyPlan.status === newStatus && verifyPlan.planStatus === planStatus) {
- return true; // Write verified
- }
- throw new Error('Write verification failed - status mismatch');
- } catch (persistError: unknown) {
- // File doesn't exist - nothing to update (not an error)
- if (persistError && typeof persistError === 'object' && 'code' in persistError && persistError.code === 'ENOENT') {
- return true;
- }
- const isLastAttempt = attempt === maxRetries;
- if (isLastAttempt) {
- // Only log error if main plan fails; worktree plan might legitimately be missing or read-only
- if (isMain) {
- console.error('Failed to persist task status to main plan after retries:', persistError);
- } else {
- debug('Failed to persist task status to worktree plan (non-critical):', persistError);
+ try {
+ await withRetry(
+ async () => {
+ const planContent = await fsPromises.readFile(planPath, 'utf-8');
+ const plan = JSON.parse(planContent);
+ plan.status = newStatus;
+ plan.planStatus = planStatus;
+ plan.updated_at = new Date().toISOString();
+ if (staged) {
+ plan.stagedAt = new Date().toISOString();
+ plan.stagedInMainProject = true;
}
- return false;
+ await fsPromises.writeFile(planPath, JSON.stringify(plan, null, 2));
+
+ // Verify the write succeeded by reading back
+ const verifyContent = await fsPromises.readFile(planPath, 'utf-8');
+ const verifyPlan = JSON.parse(verifyContent);
+ if (verifyPlan.status !== newStatus || verifyPlan.planStatus !== planStatus) {
+ throw new Error('Write verification failed - status mismatch');
+ }
+ },
+ {
+ maxRetries: 3,
+ baseDelayMs: 100,
+ shouldRetry: (err) => !isFileNotFound(err) // Don't retry if file doesn't exist
}
- // Wait before retry (exponential backoff: 100ms, 200ms, 400ms)
- await new Promise(r => setTimeout(r, 100 * Math.pow(2, attempt - 1)));
+ );
+ return true;
+ } catch (err) {
+ // File doesn't exist - nothing to update (not an error)
+ if (isFileNotFound(err)) {
+ return true;
}
+ // Only log error if main plan fails; worktree plan might legitimately be missing or read-only
+ if (isMain) {
+ console.error('Failed to persist task status to main plan after retries:', err);
+ } else {
+ debug('Failed to persist task status to worktree plan (non-critical):', err);
+ }
+ return false;
}
- return false;
};
const updatePlans = async () => {
@@ -1766,8 +2311,15 @@ export function registerWorktreeHandlers(
}
};
- // Run async updates without blocking the response
- updatePlans().catch(err => debug('Background plan update failed:', err));
+ // IMPORTANT: Wait for plan updates to complete before responding (fixes #243)
+ // Previously this was "fire and forget" which caused a race condition:
+ // resolve() would return before files were written, and UI refresh would read old status
+ try {
+ await updatePlans();
+ } catch (err) {
+ debug('Plan update failed:', err);
+ // Non-fatal: UI will still update, but status may not persist across refresh
+ }
const mainWindow = getMainWindow();
if (mainWindow) {
@@ -1785,8 +2337,17 @@ export function registerWorktreeHandlers(
}
});
} else {
- // Check if there were conflicts
- const hasConflicts = stdout.includes('conflict') || stderr.includes('conflict');
+ // Check if there were actual merge conflicts
+ // More specific patterns to avoid false positives from debug output like "files_with_conflicts: 0"
+ const conflictPatterns = [
+ /CONFLICT \(/i, // Git merge conflict marker
+ /merge conflict/i, // Explicit merge conflict message
+ /\bconflict detected\b/i, // Our own conflict detection message
+ /\bconflicts? found\b/i, // "conflicts found" or "conflict found"
+ /Automatic merge failed/i, // Git's automatic merge failure message
+ ];
+ const combinedOutput = stdout + stderr;
+ const hasConflicts = conflictPatterns.some(pattern => pattern.test(combinedOutput));
debug('Merge failed. hasConflicts:', hasConflicts);
resolve({
@@ -1863,27 +2424,31 @@ export function registerWorktreeHandlers(
}
console.warn('[IPC] Found task:', task.specId, 'project:', project.name);
- // Check for uncommitted changes in the main project
+ // Check for uncommitted changes in the main project (only if not a bare repo)
let hasUncommittedChanges = false;
let uncommittedFiles: string[] = [];
- try {
- const gitStatus = execFileSync(getToolPath('git'), ['status', '--porcelain'], {
- cwd: project.path,
- encoding: 'utf-8'
- });
+ if (isGitWorkTree(project.path)) {
+ try {
+ const gitStatus = execFileSync(getToolPath('git'), ['status', '--porcelain'], {
+ cwd: project.path,
+ encoding: 'utf-8'
+ });
- if (gitStatus && gitStatus.trim()) {
- // Parse the status output to get file names
- // Format: XY filename (where X and Y are status chars, then space, then filename)
- uncommittedFiles = gitStatus
- .split('\n')
- .filter(line => line.trim())
- .map(line => line.substring(3).trim()); // Skip 2 status chars + 1 space, trim any trailing whitespace
+ if (gitStatus && gitStatus.trim()) {
+ // Parse the status output to get file names
+ // Format: XY filename (where X and Y are status chars, then space, then filename)
+ uncommittedFiles = gitStatus
+ .split('\n')
+ .filter(line => line.trim())
+ .map(line => line.substring(3).trim()); // Skip 2 status chars + 1 space, trim any trailing whitespace
- hasUncommittedChanges = uncommittedFiles.length > 0;
+ hasUncommittedChanges = uncommittedFiles.length > 0;
+ }
+ } catch (e) {
+ console.error('[IPC] Failed to check git status:', e);
}
- } catch (e) {
- console.error('[IPC] Failed to check git status:', e);
+ } else {
+ console.warn('[IPC] Project is a bare repository - skipping uncommitted changes check');
}
const sourcePath = getEffectiveSourcePath();
@@ -1901,11 +2466,18 @@ export function registerWorktreeHandlers(
'--merge-preview'
];
- // Add --base-branch if task was created with a specific base branch
+ // Add --base-branch with proper priority:
+ // 1. Task metadata baseBranch (explicit task-level override)
+ // 2. Project settings mainBranch (project-level default)
+ // This matches the logic in execution-handlers.ts
const taskBaseBranch = getTaskBaseBranch(specDir);
- if (taskBaseBranch) {
- args.push('--base-branch', taskBaseBranch);
- console.warn('[IPC] Using stored base branch for preview:', taskBaseBranch);
+ const projectMainBranch = project.settings?.mainBranch;
+ const effectiveBaseBranch = taskBaseBranch || projectMainBranch;
+
+ if (effectiveBaseBranch) {
+ args.push('--base-branch', effectiveBaseBranch);
+ console.warn('[IPC] Using base branch for preview:', effectiveBaseBranch,
+ `(source: ${taskBaseBranch ? 'task metadata' : 'project settings'})`);
}
// Use configured Python path (venv if ready, otherwise bundled/system)
@@ -2012,7 +2584,7 @@ export function registerWorktreeHandlers(
/**
* Discard the worktree changes
- * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/
+ * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/
*/
ipcMain.handle(
IPC_CHANNELS.TASK_WORKTREE_DISCARD,
@@ -2023,10 +2595,10 @@ export function registerWorktreeHandlers(
return { success: false, error: 'Task not found' };
}
- // Per-spec worktree path: .worktrees/{spec-name}/
- const worktreePath = path.join(project.path, '.worktrees', task.specId);
+ // Find worktree at .auto-claude/worktrees/tasks/{spec-name}/
+ const worktreePath = findTaskWorktree(project.path, task.specId);
- if (!existsSync(worktreePath)) {
+ if (!worktreePath) {
return {
success: true,
data: {
@@ -2090,7 +2662,7 @@ export function registerWorktreeHandlers(
/**
* List all spec worktrees for a project
- * Per-spec architecture: Each spec has its own worktree at .worktrees/{spec-name}/
+ * Per-spec architecture: Each spec has its own worktree at .auto-claude/worktrees/tasks/{spec-name}/
*/
ipcMain.handle(
IPC_CHANNELS.TASK_LIST_WORKTREES,
@@ -2101,23 +2673,11 @@ export function registerWorktreeHandlers(
return { success: false, error: 'Project not found' };
}
- const worktreesDir = path.join(project.path, '.worktrees');
const worktrees: WorktreeListItem[] = [];
+ const worktreesDir = getTaskWorktreeDir(project.path);
- if (!existsSync(worktreesDir)) {
- return { success: true, data: { worktrees } };
- }
-
- // Get all directories in .worktrees
- const entries = readdirSync(worktreesDir);
- for (const entry of entries) {
- const entryPath = path.join(worktreesDir, entry);
- const stat = statSync(entryPath);
-
- // Skip worker directories and non-directories
- if (!stat.isDirectory() || entry.startsWith('worker-')) {
- continue;
- }
+ // Helper to process a single worktree entry
+ const processWorktreeEntry = (entry: string, entryPath: string) => {
try {
// Get branch info
@@ -2126,16 +2686,10 @@ export function registerWorktreeHandlers(
encoding: 'utf-8'
}).trim();
- // Get base branch - the current branch in the main project (where changes will be merged)
- let baseBranch = 'main';
- try {
- baseBranch = execFileSync(getToolPath('git'), ['rev-parse', '--abbrev-ref', 'HEAD'], {
- cwd: project.path,
- encoding: 'utf-8'
- }).trim();
- } catch {
- baseBranch = 'main';
- }
+ // Get base branch using proper fallback chain:
+ // 1. Task metadata baseBranch, 2. Project settings mainBranch, 3. main/master detection
+ // Note: We do NOT use current HEAD as that may be a feature branch
+ const baseBranch = getEffectiveBaseBranch(project.path, entry, project.settings?.mainBranch);
// Get commit count (cross-platform - no shell syntax)
let commitCount = 0;
@@ -2188,6 +2742,22 @@ export function registerWorktreeHandlers(
console.error(`Error getting info for worktree ${entry}:`, gitError);
// Skip this worktree if we can't get git info
}
+ };
+
+ // Scan worktrees directory
+ if (existsSync(worktreesDir)) {
+ const entries = readdirSync(worktreesDir);
+ for (const entry of entries) {
+ const entryPath = path.join(worktreesDir, entry);
+ try {
+ const stat = statSync(entryPath);
+ if (stat.isDirectory()) {
+ processWorktreeEntry(entry, entryPath);
+ }
+ } catch {
+ // Skip entries that can't be stat'd
+ }
+ }
}
return { success: true, data: { worktrees } };
@@ -2273,4 +2843,276 @@ export function registerWorktreeHandlers(
}
}
);
+
+ /**
+ * Create a Pull Request from the worktree branch
+ * Pushes the branch to origin and creates a GitHub PR using gh CLI
+ */
+ ipcMain.handle(
+ IPC_CHANNELS.TASK_WORKTREE_CREATE_PR,
+ async (_, taskId: string, options?: WorktreeCreatePROptions): Promise> => {
+ const isDebugMode = process.env.DEBUG === 'true' || process.env.NODE_ENV === 'development';
+ const debug = (...args: unknown[]) => {
+ if (isDebugMode) {
+ console.warn('[CREATE_PR DEBUG]', ...args);
+ }
+ };
+
+ try {
+ debug('Handler called with taskId:', taskId, 'options:', options);
+
+ // Ensure Python environment is ready
+ const pythonEnvError = await initializePythonEnvForPR(pythonEnvManager);
+ if (pythonEnvError) {
+ return { success: false, error: pythonEnvError };
+ }
+
+ const { task, project } = findTaskAndProject(taskId);
+ if (!task || !project) {
+ debug('Task or project not found');
+ return { success: false, error: 'Task not found' };
+ }
+
+ debug('Found task:', task.specId, 'project:', project.path);
+
+ // Use run.py --create-pr to handle the PR creation
+ const sourcePath = getEffectiveSourcePath();
+ if (!sourcePath) {
+ return { success: false, error: 'Auto Claude source not found' };
+ }
+
+ const runScript = path.join(sourcePath, 'run.py');
+ const specDir = path.join(project.path, project.autoBuildPath || '.auto-claude', 'specs', task.specId);
+
+ // Use EAFP pattern - try to read specDir and catch ENOENT
+ try {
+ statSync(specDir);
+ } catch (err) {
+ if (err && typeof err === 'object' && 'code' in err && err.code === 'ENOENT') {
+ debug('Spec directory not found:', specDir);
+ return { success: false, error: 'Spec directory not found' };
+ }
+ throw err; // Re-throw unexpected errors
+ }
+
+ // Check worktree exists before creating PR
+ const worktreePath = findTaskWorktree(project.path, task.specId);
+ if (!worktreePath) {
+ debug('No worktree found for spec:', task.specId);
+ return { success: false, error: 'No worktree found for this task' };
+ }
+ debug('Worktree path:', worktreePath);
+
+ // Build arguments using helper function
+ const taskBaseBranch = getTaskBaseBranch(specDir);
+ const { args, validationError } = buildCreatePRArgs(
+ runScript,
+ task.specId,
+ project.path,
+ options,
+ taskBaseBranch
+ );
+ if (validationError) {
+ return { success: false, error: validationError };
+ }
+ if (taskBaseBranch) {
+ debug('Using stored base branch:', taskBaseBranch);
+ }
+
+ // Use configured Python path
+ const pythonPath = getConfiguredPythonPath();
+ debug('Running command:', pythonPath, args.join(' '));
+ debug('Working directory:', sourcePath);
+
+ // Get profile environment with OAuth token
+ const profileEnv = getProfileEnv();
+
+ return new Promise((resolve) => {
+ let timeoutId: NodeJS.Timeout | null = null;
+ let resolved = false;
+
+ // Get Python environment for bundled packages
+ const pythonEnv = pythonEnvManagerSingleton.getPythonEnv();
+
+ // Parse Python command to handle space-separated commands like "py -3"
+ const [pythonCommand, pythonBaseArgs] = parsePythonCommand(pythonPath);
+ const createPRProcess = spawn(pythonCommand, [...pythonBaseArgs, ...args], {
+ cwd: sourcePath,
+ env: {
+ ...process.env,
+ ...pythonEnv,
+ ...profileEnv,
+ PYTHONUNBUFFERED: '1',
+ PYTHONUTF8: '1'
+ },
+ stdio: ['ignore', 'pipe', 'pipe']
+ });
+
+ let stdout = '';
+ let stderr = '';
+
+ // Set up timeout to kill hung processes
+ timeoutId = setTimeout(() => {
+ if (!resolved) {
+ debug('TIMEOUT: Create PR process exceeded', PR_CREATION_TIMEOUT_MS, 'ms, killing...');
+ resolved = true;
+
+ // Platform-specific process termination with fallback
+ if (process.platform === 'win32') {
+ try {
+ createPRProcess.kill();
+ // Fallback: forcefully kill with taskkill if process ignores initial kill
+ if (createPRProcess.pid) {
+ setTimeout(() => {
+ try {
+ spawn('taskkill', ['/pid', createPRProcess.pid!.toString(), '/f', '/t'], {
+ stdio: 'ignore',
+ detached: true
+ }).unref();
+ } catch {
+ // Process may already be dead
+ }
+ }, 5000);
+ }
+ } catch {
+ // Process may already be dead
+ }
+ } else {
+ createPRProcess.kill('SIGTERM');
+ setTimeout(() => {
+ try {
+ createPRProcess.kill('SIGKILL');
+ } catch {
+ // Process may already be dead
+ }
+ }, 5000);
+ }
+
+ resolve({
+ success: false,
+ error: 'PR creation timed out. Check if the PR was created on GitHub.'
+ });
+ }
+ }, PR_CREATION_TIMEOUT_MS);
+
+ createPRProcess.stdout.on('data', (data: Buffer) => {
+ const chunk = data.toString();
+ stdout += chunk;
+ debug('STDOUT:', chunk);
+ });
+
+ createPRProcess.stderr.on('data', (data: Buffer) => {
+ const chunk = data.toString();
+ stderr += chunk;
+ debug('STDERR:', chunk);
+ });
+
+ /**
+ * Handle process exit - shared logic for both 'close' and 'exit' events.
+ * Parses JSON output, updates task status if PR was created, and resolves the promise.
+ *
+ * @param code - Process exit code (0 = success, non-zero = failure)
+ * @param eventSource - Which event triggered this ('close' or 'exit') for debug logging
+ */
+ const handleCreatePRProcessExit = async (code: number | null, eventSource: 'close' | 'exit'): Promise => {
+ if (resolved) return;
+ resolved = true;
+ if (timeoutId) clearTimeout(timeoutId);
+
+ debug(`Process exited via ${eventSource} event with code:`, code);
+ debug('Full stdout:', stdout);
+ debug('Full stderr:', stderr);
+
+ if (code === 0) {
+ // Parse JSON output using helper function
+ const result = parsePRJsonOutput(stdout);
+ if (result) {
+ debug('Parsed result:', result);
+
+ // Only update task status if a NEW PR was created (not if it already exists)
+ if (result.success !== false && result.prUrl && !result.alreadyExists) {
+ await updateTaskStatusAfterPRCreation(
+ specDir,
+ worktreePath,
+ result.prUrl,
+ project.autoBuildPath,
+ task.specId,
+ debug
+ );
+ } else if (result.alreadyExists) {
+ debug('PR already exists, not updating task status');
+ }
+
+ resolve({
+ success: true,
+ data: {
+ success: result.success,
+ prUrl: result.prUrl,
+ error: result.error,
+ alreadyExists: result.alreadyExists
+ }
+ });
+ } else {
+ // No JSON found, but process succeeded
+ debug('No JSON in output, assuming success');
+ resolve({
+ success: true,
+ data: {
+ success: true,
+ prUrl: undefined
+ }
+ });
+ }
+ } else {
+ debug('Process failed with code:', code);
+
+ // Try to parse JSON from stdout even on failure
+ const result = parsePRJsonOutput(stdout);
+ if (result) {
+ debug('Parsed error result:', result);
+ resolve({
+ success: false,
+ error: result.error || 'Failed to create PR'
+ });
+ } else {
+ // Fallback to raw output if JSON parsing fails
+ // Prefer stdout over stderr since stderr often contains debug messages
+ resolve({
+ success: false,
+ error: stdout || stderr || 'Failed to create PR'
+ });
+ }
+ }
+ };
+
+ createPRProcess.on('close', (code: number | null) => {
+ handleCreatePRProcessExit(code, 'close');
+ });
+
+ // Also listen to 'exit' event in case 'close' doesn't fire
+ createPRProcess.on('exit', (code: number | null) => {
+ // Give close event a chance to fire first with complete output
+ setTimeout(() => handleCreatePRProcessExit(code, 'exit'), 100);
+ });
+
+ createPRProcess.on('error', (err: Error) => {
+ if (resolved) return;
+ resolved = true;
+ if (timeoutId) clearTimeout(timeoutId);
+ debug('Process spawn error:', err);
+ resolve({
+ success: false,
+ error: `Failed to run create-pr: ${err.message}`
+ });
+ });
+ });
+ } catch (error) {
+ console.error('[CREATE_PR] Exception in handler:', error);
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to create PR'
+ };
+ }
+ }
+ );
}
diff --git a/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts b/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts
index b76d136314..96edd3c437 100644
--- a/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts
+++ b/apps/frontend/src/main/ipc-handlers/terminal-handlers.ts
@@ -9,6 +9,7 @@ import { projectStore } from '../project-store';
import { terminalNameGenerator } from '../terminal-name-generator';
import { debugLog, debugError } from '../../shared/utils/debug-logger';
import { escapeShellArg, escapeShellArgWindows } from '../../shared/utils/shell-escape';
+import { getClaudeCliInvocationAsync } from '../claude-cli-utils';
/**
@@ -53,7 +54,10 @@ export function registerTerminalHandlers(
ipcMain.on(
IPC_CHANNELS.TERMINAL_INVOKE_CLAUDE,
(_, id: string, cwd?: string) => {
- terminalManager.invokeClaude(id, cwd);
+ // Use async version to avoid blocking main process during CLI detection
+ terminalManager.invokeClaudeAsync(id, cwd).catch((error) => {
+ console.error('[terminal-handlers] Failed to invoke Claude:', error);
+ });
}
);
@@ -76,6 +80,22 @@ export function registerTerminalHandlers(
}
);
+ // Set terminal title (user renamed terminal in renderer)
+ ipcMain.on(
+ IPC_CHANNELS.TERMINAL_SET_TITLE,
+ (_, id: string, title: string) => {
+ terminalManager.setTitle(id, title);
+ }
+ );
+
+ // Set terminal worktree config (user changed worktree association in renderer)
+ ipcMain.on(
+ IPC_CHANNELS.TERMINAL_SET_WORKTREE_CONFIG,
+ (_, id: string, config: import('../../shared/types').TerminalWorktreeConfig | undefined) => {
+ terminalManager.setWorktreeConfig(id, config);
+ }
+ );
+
// Claude profile management (multi-account support)
ipcMain.handle(
IPC_CHANNELS.CLAUDE_PROFILES_GET,
@@ -321,7 +341,15 @@ export function registerTerminalHandlers(
});
// Create a new terminal for the login process
- await terminalManager.create({ id: terminalId, cwd: homeDir });
+ const createResult = await terminalManager.create({ id: terminalId, cwd: homeDir });
+
+ // If terminal creation failed, return the error
+ if (!createResult.success) {
+ return {
+ success: false,
+ error: createResult.error || 'Failed to create terminal for authentication'
+ };
+ }
// Wait a moment for the terminal to initialize
await new Promise(resolve => setTimeout(resolve, 500));
@@ -329,20 +357,30 @@ export function registerTerminalHandlers(
// Build the login command with the profile's config dir
// Use platform-specific syntax and escaping for environment variables
let loginCommand: string;
+ const { command: claudeCmd, env: claudeEnv } = await getClaudeCliInvocationAsync();
+ const pathPrefix = claudeEnv.PATH
+ ? (process.platform === 'win32'
+ ? `set "PATH=${escapeShellArgWindows(claudeEnv.PATH)}" && `
+ : `export PATH=${escapeShellArg(claudeEnv.PATH)} && `)
+ : '';
+ const shellClaudeCmd = process.platform === 'win32'
+ ? `"${escapeShellArgWindows(claudeCmd)}"`
+ : escapeShellArg(claudeCmd);
+
if (!profile.isDefault && profile.configDir) {
if (process.platform === 'win32') {
// SECURITY: Use Windows-specific escaping for cmd.exe
const escapedConfigDir = escapeShellArgWindows(profile.configDir);
// Windows cmd.exe syntax: set "VAR=value" with %VAR% for expansion
- loginCommand = `set "CLAUDE_CONFIG_DIR=${escapedConfigDir}" && echo Config dir: %CLAUDE_CONFIG_DIR% && claude setup-token`;
+ loginCommand = `${pathPrefix}set "CLAUDE_CONFIG_DIR=${escapedConfigDir}" && echo Config dir: %CLAUDE_CONFIG_DIR% && ${shellClaudeCmd} setup-token`;
} else {
// SECURITY: Use POSIX escaping for bash/zsh
const escapedConfigDir = escapeShellArg(profile.configDir);
// Unix/Mac bash/zsh syntax: export VAR=value with $VAR for expansion
- loginCommand = `export CLAUDE_CONFIG_DIR=${escapedConfigDir} && echo "Config dir: $CLAUDE_CONFIG_DIR" && claude setup-token`;
+ loginCommand = `${pathPrefix}export CLAUDE_CONFIG_DIR=${escapedConfigDir} && echo "Config dir: $CLAUDE_CONFIG_DIR" && ${shellClaudeCmd} setup-token`;
}
} else {
- loginCommand = 'claude setup-token';
+ loginCommand = `${pathPrefix}${shellClaudeCmd} setup-token`;
}
debugLog('[IPC] Sending login command to terminal:', loginCommand);
@@ -350,10 +388,11 @@ export function registerTerminalHandlers(
// Write the login command to the terminal
terminalManager.write(terminalId, `${loginCommand}\r`);
- // Notify the renderer that a login terminal was created
+ // Notify the renderer that an auth terminal was created
+ // This allows the UI to display the terminal so users can see the OAuth flow
const mainWindow = getMainWindow();
if (mainWindow) {
- mainWindow.webContents.send('claude-profile-login-terminal', {
+ mainWindow.webContents.send(IPC_CHANNELS.TERMINAL_AUTH_CREATED, {
terminalId,
profileId,
profileName: profile.name
@@ -599,7 +638,21 @@ export function registerTerminalHandlers(
ipcMain.on(
IPC_CHANNELS.TERMINAL_RESUME_CLAUDE,
(_, id: string, sessionId?: string) => {
- terminalManager.resumeClaude(id, sessionId);
+ // Use async version to avoid blocking main process during CLI detection
+ terminalManager.resumeClaudeAsync(id, sessionId).catch((error) => {
+ console.error('[terminal-handlers] Failed to resume Claude:', error);
+ });
+ }
+ );
+
+ // Activate deferred Claude resume when terminal becomes active
+ // This is triggered by the renderer when a terminal with pendingClaudeResume becomes the active tab
+ ipcMain.on(
+ IPC_CHANNELS.TERMINAL_ACTIVATE_DEFERRED_RESUME,
+ (_, id: string) => {
+ terminalManager.activateDeferredResume(id).catch((error) => {
+ console.error('[terminal-handlers] Failed to activate deferred Claude resume:', error);
+ });
}
);
diff --git a/apps/frontend/src/main/ipc-handlers/terminal/index.ts b/apps/frontend/src/main/ipc-handlers/terminal/index.ts
new file mode 100644
index 0000000000..3b235fe038
--- /dev/null
+++ b/apps/frontend/src/main/ipc-handlers/terminal/index.ts
@@ -0,0 +1,17 @@
+/**
+ * Terminal handlers module
+ *
+ * This module organizes terminal worktree-related IPC handlers:
+ * - Worktree operations (create, list, remove)
+ */
+
+import { registerTerminalWorktreeHandlers } from './worktree-handlers';
+
+/**
+ * Register all terminal worktree IPC handlers
+ */
+export function registerTerminalWorktreeIpcHandlers(): void {
+ registerTerminalWorktreeHandlers();
+}
+
+export { registerTerminalWorktreeHandlers } from './worktree-handlers';
diff --git a/apps/frontend/src/main/ipc-handlers/terminal/worktree-handlers.ts b/apps/frontend/src/main/ipc-handlers/terminal/worktree-handlers.ts
new file mode 100644
index 0000000000..ca91fd70fb
--- /dev/null
+++ b/apps/frontend/src/main/ipc-handlers/terminal/worktree-handlers.ts
@@ -0,0 +1,560 @@
+import { ipcMain } from 'electron';
+import { IPC_CHANNELS } from '../../../shared/constants';
+import type {
+ IPCResult,
+ CreateTerminalWorktreeRequest,
+ TerminalWorktreeConfig,
+ TerminalWorktreeResult,
+} from '../../../shared/types';
+import path from 'path';
+import { existsSync, mkdirSync, writeFileSync, readFileSync, readdirSync, rmSync } from 'fs';
+import { execFileSync } from 'child_process';
+import { minimatch } from 'minimatch';
+import { debugLog, debugError } from '../../../shared/utils/debug-logger';
+import { projectStore } from '../../project-store';
+import { parseEnvFile } from '../utils';
+import {
+ getTerminalWorktreeDir,
+ getTerminalWorktreePath,
+ getTerminalWorktreeMetadataDir,
+ getTerminalWorktreeMetadataPath,
+} from '../../worktree-paths';
+
+// Shared validation regex for worktree names - lowercase alphanumeric with dashes/underscores
+// Must start and end with alphanumeric character
+const WORKTREE_NAME_REGEX = /^[a-z0-9][a-z0-9_-]*[a-z0-9]$|^[a-z0-9]$/;
+
+// Validation regex for git branch names - allows alphanumeric, dots, slashes, dashes, underscores
+const GIT_BRANCH_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9._/-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$/;
+
+/**
+ * Fix repositories that are incorrectly marked with core.bare=true.
+ * This can happen when git worktree operations incorrectly set bare=true
+ * on a working repository that has source files.
+ *
+ * Returns true if a fix was applied, false otherwise.
+ */
+function fixMisconfiguredBareRepo(projectPath: string): boolean {
+ try {
+ // Check if bare=true is set
+ const bareConfig = execFileSync(
+ 'git',
+ ['config', '--get', 'core.bare'],
+ { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
+ ).trim().toLowerCase();
+
+ if (bareConfig !== 'true') {
+ return false; // Not marked as bare, nothing to fix
+ }
+
+ // Check if there are source files (indicating misconfiguration)
+ // A truly bare repo would only have git internals, not source code
+ // This covers multiple ecosystems: JS/TS, Python, Rust, Go, Java, C#, etc.
+ const EXACT_MARKERS = [
+ // JavaScript/TypeScript ecosystem
+ 'package.json', 'apps', 'src',
+ // Python ecosystem
+ 'pyproject.toml', 'setup.py', 'requirements.txt', 'Pipfile',
+ // Rust ecosystem
+ 'Cargo.toml',
+ // Go ecosystem
+ 'go.mod', 'go.sum', 'cmd', 'main.go',
+ // Java/JVM ecosystem
+ 'pom.xml', 'build.gradle', 'build.gradle.kts',
+ // Ruby ecosystem
+ 'Gemfile', 'Rakefile',
+ // PHP ecosystem
+ 'composer.json',
+ // General project markers
+ 'Makefile', 'CMakeLists.txt', 'README.md', 'LICENSE'
+ ];
+
+ const GLOB_MARKERS = [
+ // .NET/C# ecosystem - patterns that need glob matching
+ '*.csproj', '*.sln', '*.fsproj'
+ ];
+
+ // Check exact matches first (fast path)
+ const hasExactMatch = EXACT_MARKERS.some(marker =>
+ existsSync(path.join(projectPath, marker))
+ );
+
+ if (hasExactMatch) {
+ // Found a project marker, proceed to fix
+ } else {
+ // Check glob patterns - read directory once and cache for all patterns
+ let directoryFiles: string[] | null = null;
+ const MAX_FILES_TO_CHECK = 500;
+
+ const hasGlobMatch = GLOB_MARKERS.some(pattern => {
+ // Validate pattern - only support simple glob patterns for security
+ if (pattern.includes('..') || pattern.includes('/')) {
+ debugLog('[TerminalWorktree] Unsupported glob pattern ignored:', pattern);
+ return false;
+ }
+
+ // Lazy-load directory listing, cached across patterns
+ if (directoryFiles === null) {
+ try {
+ const allFiles = readdirSync(projectPath);
+ directoryFiles = allFiles.slice(0, MAX_FILES_TO_CHECK);
+ if (allFiles.length > MAX_FILES_TO_CHECK) {
+ debugLog(`[TerminalWorktree] Directory has ${allFiles.length} entries, checking only first ${MAX_FILES_TO_CHECK}`);
+ }
+ } catch (error) {
+ debugError('[TerminalWorktree] Failed to read directory:', error);
+ directoryFiles = [];
+ }
+ }
+
+ // Use minimatch for proper glob pattern matching
+ return directoryFiles.some(file => minimatch(file, pattern, { nocase: true }));
+ });
+
+ if (!hasGlobMatch) {
+ return false; // Legitimately bare repo
+ }
+ }
+
+ // Fix the misconfiguration
+ debugLog('[TerminalWorktree] Detected misconfigured bare repository with source files. Auto-fixing by unsetting core.bare...');
+ execFileSync(
+ 'git',
+ ['config', '--unset', 'core.bare'],
+ { cwd: projectPath, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
+ );
+ debugLog('[TerminalWorktree] Fixed: core.bare has been unset. Git operations should now work correctly.');
+ return true;
+ } catch {
+ return false;
+ }
+}
+
+/**
+ * Validate that projectPath is a registered project
+ */
+function isValidProjectPath(projectPath: string): boolean {
+ const projects = projectStore.getProjects();
+ return projects.some(p => p.path === projectPath);
+}
+
+const MAX_TERMINAL_WORKTREES = 12;
+
+/**
+ * Get the default branch from project settings OR env config
+ */
+function getDefaultBranch(projectPath: string): string {
+ const project = projectStore.getProjects().find(p => p.path === projectPath);
+ if (project?.settings?.mainBranch) {
+ debugLog('[TerminalWorktree] Using mainBranch from project settings:', project.settings.mainBranch);
+ return project.settings.mainBranch;
+ }
+
+ const envPath = path.join(projectPath, '.auto-claude', '.env');
+ if (existsSync(envPath)) {
+ try {
+ const content = readFileSync(envPath, 'utf-8');
+ const vars = parseEnvFile(content);
+ if (vars['DEFAULT_BRANCH']) {
+ debugLog('[TerminalWorktree] Using DEFAULT_BRANCH from env config:', vars['DEFAULT_BRANCH']);
+ return vars['DEFAULT_BRANCH'];
+ }
+ } catch (error) {
+ debugError('[TerminalWorktree] Error reading env file:', error);
+ }
+ }
+
+ for (const branch of ['main', 'master']) {
+ try {
+ execFileSync('git', ['rev-parse', '--verify', branch], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ });
+ debugLog('[TerminalWorktree] Auto-detected branch:', branch);
+ return branch;
+ } catch {
+ // Branch doesn't exist, try next
+ }
+ }
+
+ // Fallback to current branch - wrap in try-catch
+ try {
+ const currentBranch = execFileSync('git', ['rev-parse', '--abbrev-ref', 'HEAD'], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ }).trim();
+ debugLog('[TerminalWorktree] Falling back to current branch:', currentBranch);
+ return currentBranch;
+ } catch (error) {
+ debugError('[TerminalWorktree] Error detecting current branch:', error);
+ return 'main'; // Safe default
+ }
+}
+
+function saveWorktreeConfig(projectPath: string, name: string, config: TerminalWorktreeConfig): void {
+ const metadataDir = getTerminalWorktreeMetadataDir(projectPath);
+ mkdirSync(metadataDir, { recursive: true });
+ const metadataPath = getTerminalWorktreeMetadataPath(projectPath, name);
+ writeFileSync(metadataPath, JSON.stringify(config, null, 2));
+}
+
+function loadWorktreeConfig(projectPath: string, name: string): TerminalWorktreeConfig | null {
+ // Check new metadata location first
+ const metadataPath = getTerminalWorktreeMetadataPath(projectPath, name);
+ if (existsSync(metadataPath)) {
+ try {
+ return JSON.parse(readFileSync(metadataPath, 'utf-8'));
+ } catch (error) {
+ debugError('[TerminalWorktree] Corrupted config at:', metadataPath, error);
+ return null;
+ }
+ }
+
+ // Backwards compatibility: check legacy location inside worktree
+ const legacyConfigPath = path.join(getTerminalWorktreePath(projectPath, name), 'config.json');
+ if (existsSync(legacyConfigPath)) {
+ try {
+ const config = JSON.parse(readFileSync(legacyConfigPath, 'utf-8'));
+ // Migrate to new location
+ saveWorktreeConfig(projectPath, name, config);
+ // Clean up legacy file
+ try {
+ rmSync(legacyConfigPath);
+ debugLog('[TerminalWorktree] Migrated config from legacy location:', name);
+ } catch {
+ debugLog('[TerminalWorktree] Could not remove legacy config:', legacyConfigPath);
+ }
+ return config;
+ } catch (error) {
+ debugError('[TerminalWorktree] Corrupted legacy config at:', legacyConfigPath, error);
+ return null;
+ }
+ }
+
+ return null;
+}
+
+async function createTerminalWorktree(
+ request: CreateTerminalWorktreeRequest
+): Promise {
+ const { terminalId, name, taskId, createGitBranch, projectPath, baseBranch: customBaseBranch } = request;
+
+ debugLog('[TerminalWorktree] Creating worktree:', { name, taskId, createGitBranch, projectPath, customBaseBranch });
+
+ // Validate projectPath against registered projects
+ if (!isValidProjectPath(projectPath)) {
+ return {
+ success: false,
+ error: 'Invalid project path',
+ };
+ }
+
+ // Validate worktree name - use shared regex (lowercase only)
+ if (!WORKTREE_NAME_REGEX.test(name)) {
+ return {
+ success: false,
+ error: 'Invalid worktree name. Use lowercase letters, numbers, dashes, and underscores. Must start and end with alphanumeric.',
+ };
+ }
+
+ // CRITICAL: Validate customBaseBranch to prevent command injection
+ if (customBaseBranch && !GIT_BRANCH_REGEX.test(customBaseBranch)) {
+ return {
+ success: false,
+ error: 'Invalid base branch name',
+ };
+ }
+
+ const existing = await listTerminalWorktrees(projectPath);
+ if (existing.length >= MAX_TERMINAL_WORKTREES) {
+ return {
+ success: false,
+ error: `Maximum of ${MAX_TERMINAL_WORKTREES} terminal worktrees reached.`,
+ };
+ }
+
+ // Auto-fix any misconfigured bare repo before worktree operations
+ // This prevents crashes when git worktree operations have incorrectly set bare=true
+ if (fixMisconfiguredBareRepo(projectPath)) {
+ debugLog('[TerminalWorktree] Fixed misconfigured bare repository at:', projectPath);
+ }
+
+ const worktreePath = getTerminalWorktreePath(projectPath, name);
+ const branchName = `terminal/${name}`;
+ let directoryCreated = false;
+
+ try {
+ if (existsSync(worktreePath)) {
+ return { success: false, error: `Worktree '${name}' already exists.` };
+ }
+
+ mkdirSync(getTerminalWorktreeDir(projectPath), { recursive: true });
+ directoryCreated = true;
+
+ // Use custom base branch if provided, otherwise detect default
+ const baseBranch = customBaseBranch || getDefaultBranch(projectPath);
+ debugLog('[TerminalWorktree] Using base branch:', baseBranch, customBaseBranch ? '(custom)' : '(default)');
+
+ // Check if baseBranch is already a remote ref (e.g., "origin/feature-x")
+ const isRemoteRef = baseBranch.startsWith('origin/');
+ const remoteBranchName = isRemoteRef ? baseBranch.replace('origin/', '') : baseBranch;
+
+ // Fetch the branch from remote
+ try {
+ execFileSync('git', ['fetch', 'origin', remoteBranchName], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ });
+ debugLog('[TerminalWorktree] Fetched latest from origin/' + remoteBranchName);
+ } catch {
+ debugLog('[TerminalWorktree] Could not fetch from remote, continuing with local branch');
+ }
+
+ // Determine the base ref to use for worktree creation
+ let baseRef = baseBranch;
+ if (isRemoteRef) {
+ // Already a remote ref, use as-is
+ baseRef = baseBranch;
+ debugLog('[TerminalWorktree] Using remote ref directly:', baseRef);
+ } else {
+ // Check if remote version exists and use it for latest code
+ try {
+ execFileSync('git', ['rev-parse', '--verify', `origin/${baseBranch}`], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ });
+ baseRef = `origin/${baseBranch}`;
+ debugLog('[TerminalWorktree] Using remote ref:', baseRef);
+ } catch {
+ debugLog('[TerminalWorktree] Remote ref not found, using local branch:', baseBranch);
+ }
+ }
+
+ if (createGitBranch) {
+ execFileSync('git', ['worktree', 'add', '-b', branchName, worktreePath, baseRef], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ });
+ debugLog('[TerminalWorktree] Created worktree with branch:', branchName, 'from', baseRef);
+ } else {
+ execFileSync('git', ['worktree', 'add', '--detach', worktreePath, baseRef], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ });
+ debugLog('[TerminalWorktree] Created worktree in detached HEAD mode from', baseRef);
+ }
+
+ const config: TerminalWorktreeConfig = {
+ name,
+ worktreePath,
+ branchName: createGitBranch ? branchName : '',
+ baseBranch,
+ hasGitBranch: createGitBranch,
+ taskId,
+ createdAt: new Date().toISOString(),
+ terminalId,
+ };
+
+ saveWorktreeConfig(projectPath, name, config);
+ debugLog('[TerminalWorktree] Saved config for worktree:', name);
+
+ return { success: true, config };
+ } catch (error) {
+ debugError('[TerminalWorktree] Error creating worktree:', error);
+
+ // Cleanup: remove the worktree directory if git worktree creation failed
+ if (directoryCreated && existsSync(worktreePath)) {
+ try {
+ rmSync(worktreePath, { recursive: true, force: true });
+ debugLog('[TerminalWorktree] Cleaned up failed worktree directory:', worktreePath);
+ // Also prune stale worktree registrations in case git worktree add partially succeeded
+ try {
+ execFileSync('git', ['worktree', 'prune'], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ });
+ debugLog('[TerminalWorktree] Pruned stale worktree registrations');
+ } catch {
+ // Ignore prune errors - not critical
+ }
+ } catch (cleanupError) {
+ debugError('[TerminalWorktree] Failed to cleanup worktree directory:', cleanupError);
+ }
+ }
+
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to create worktree',
+ };
+ }
+}
+
+async function listTerminalWorktrees(projectPath: string): Promise {
+ // Validate projectPath against registered projects
+ if (!isValidProjectPath(projectPath)) {
+ debugError('[TerminalWorktree] Invalid project path for listing:', projectPath);
+ return [];
+ }
+
+ const configs: TerminalWorktreeConfig[] = [];
+ const seenNames = new Set();
+
+ // Scan new metadata directory
+ const metadataDir = getTerminalWorktreeMetadataDir(projectPath);
+ if (existsSync(metadataDir)) {
+ try {
+ for (const file of readdirSync(metadataDir, { withFileTypes: true })) {
+ if (file.isFile() && file.name.endsWith('.json')) {
+ const name = file.name.replace('.json', '');
+ const config = loadWorktreeConfig(projectPath, name);
+ if (config) {
+ configs.push(config);
+ seenNames.add(name);
+ }
+ }
+ }
+ } catch (error) {
+ debugError('[TerminalWorktree] Error scanning metadata dir:', error);
+ }
+ }
+
+ // Also scan worktree directory for legacy configs (will be migrated on load)
+ const worktreeDir = getTerminalWorktreeDir(projectPath);
+ if (existsSync(worktreeDir)) {
+ try {
+ for (const dir of readdirSync(worktreeDir, { withFileTypes: true })) {
+ if (dir.isDirectory() && !seenNames.has(dir.name)) {
+ const config = loadWorktreeConfig(projectPath, dir.name);
+ if (config) {
+ configs.push(config);
+ }
+ }
+ }
+ } catch (error) {
+ debugError('[TerminalWorktree] Error scanning worktree dir:', error);
+ }
+ }
+
+ return configs;
+}
+
+async function removeTerminalWorktree(
+ projectPath: string,
+ name: string,
+ deleteBranch: boolean = false
+): Promise {
+ debugLog('[TerminalWorktree] Removing worktree:', { name, deleteBranch, projectPath });
+
+ // Validate projectPath against registered projects
+ if (!isValidProjectPath(projectPath)) {
+ return { success: false, error: 'Invalid project path' };
+ }
+
+ // Validate worktree name to prevent path traversal
+ if (!WORKTREE_NAME_REGEX.test(name)) {
+ return { success: false, error: 'Invalid worktree name' };
+ }
+
+ // Auto-fix any misconfigured bare repo before worktree operations
+ if (fixMisconfiguredBareRepo(projectPath)) {
+ debugLog('[TerminalWorktree] Fixed misconfigured bare repository at:', projectPath);
+ }
+
+ const worktreePath = getTerminalWorktreePath(projectPath, name);
+ const config = loadWorktreeConfig(projectPath, name);
+
+ if (!config) {
+ return { success: false, error: 'Worktree not found' };
+ }
+
+ try {
+ if (existsSync(worktreePath)) {
+ execFileSync('git', ['worktree', 'remove', '--force', worktreePath], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ });
+ debugLog('[TerminalWorktree] Removed git worktree');
+ }
+
+ if (deleteBranch && config.hasGitBranch && config.branchName) {
+ // Re-validate branch name from config file (defense in depth - config could be modified)
+ if (!GIT_BRANCH_REGEX.test(config.branchName)) {
+ debugError('[TerminalWorktree] Invalid branch name in config:', config.branchName);
+ } else {
+ try {
+ execFileSync('git', ['branch', '-D', config.branchName], {
+ cwd: projectPath,
+ encoding: 'utf-8',
+ stdio: ['pipe', 'pipe', 'pipe'],
+ });
+ debugLog('[TerminalWorktree] Deleted branch:', config.branchName);
+ } catch {
+ debugLog('[TerminalWorktree] Branch not found or already deleted:', config.branchName);
+ }
+ }
+ }
+
+ // Remove metadata file
+ const metadataPath = getTerminalWorktreeMetadataPath(projectPath, name);
+ if (existsSync(metadataPath)) {
+ try {
+ rmSync(metadataPath);
+ debugLog('[TerminalWorktree] Removed metadata file:', metadataPath);
+ } catch {
+ debugLog('[TerminalWorktree] Could not remove metadata file:', metadataPath);
+ }
+ }
+
+ return { success: true };
+ } catch (error) {
+ debugError('[TerminalWorktree] Error removing worktree:', error);
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to remove worktree',
+ };
+ }
+}
+
+export function registerTerminalWorktreeHandlers(): void {
+ ipcMain.handle(
+ IPC_CHANNELS.TERMINAL_WORKTREE_CREATE,
+ async (_, request: CreateTerminalWorktreeRequest): Promise => {
+ return createTerminalWorktree(request);
+ }
+ );
+
+ ipcMain.handle(
+ IPC_CHANNELS.TERMINAL_WORKTREE_LIST,
+ async (_, projectPath: string): Promise> => {
+ try {
+ const configs = await listTerminalWorktrees(projectPath);
+ return { success: true, data: configs };
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to list worktrees',
+ };
+ }
+ }
+ );
+
+ ipcMain.handle(
+ IPC_CHANNELS.TERMINAL_WORKTREE_REMOVE,
+ async (
+ _,
+ projectPath: string,
+ name: string,
+ deleteBranch: boolean
+ ): Promise => {
+ return removeTerminalWorktree(projectPath, name, deleteBranch);
+ }
+ );
+}
diff --git a/apps/frontend/src/main/memory-env-builder.ts b/apps/frontend/src/main/memory-env-builder.ts
index 804c952600..6382757d73 100644
--- a/apps/frontend/src/main/memory-env-builder.ts
+++ b/apps/frontend/src/main/memory-env-builder.ts
@@ -8,6 +8,7 @@
*/
import type { AppSettings } from '../shared/types/settings';
+import { getMemoriesDir } from './config-paths';
/**
* Build environment variables for memory/Graphiti configuration from app settings.
@@ -26,6 +27,10 @@ export function buildMemoryEnvVars(settings: AppSettings): Record apps/backend
path.resolve(__dirname, '..', '..', '..', 'backend', 'query_memory.py'),
path.resolve(app.getAppPath(), '..', 'backend', 'query_memory.py'),
@@ -112,6 +114,68 @@ function getQueryScriptPath(): string | null {
return null;
}
+/**
+ * Get the backend venv Python path.
+ * The backend venv has real_ladybug installed (required for memory operations).
+ * Falls back to getConfiguredPythonPath() for packaged apps.
+ */
+function getBackendPythonPath(): string {
+ // For packaged apps, use the bundled Python which has real_ladybug in site-packages
+ if (app.isPackaged) {
+ const fallbackPython = getConfiguredPythonPath();
+ console.log(`[MemoryService] Using bundled Python for packaged app: ${fallbackPython}`);
+ return fallbackPython;
+ }
+
+ // Development mode: Find the backend venv which has real_ladybug installed
+ const possibleBackendPaths = [
+ path.resolve(__dirname, '..', '..', '..', 'backend'),
+ path.resolve(app.getAppPath(), '..', 'backend'),
+ path.resolve(process.cwd(), 'apps', 'backend')
+ ];
+
+ for (const backendPath of possibleBackendPaths) {
+ // Check for backend venv Python (has real_ladybug installed)
+ const venvPython = process.platform === 'win32'
+ ? path.join(backendPath, '.venv', 'Scripts', 'python.exe')
+ : path.join(backendPath, '.venv', 'bin', 'python');
+
+ if (fs.existsSync(venvPython)) {
+ console.log(`[MemoryService] Using backend venv Python: ${venvPython}`);
+ return venvPython;
+ }
+ }
+
+ // Fall back to configured Python path
+ const fallbackPython = getConfiguredPythonPath();
+ console.log(`[MemoryService] Backend venv not found, falling back to: ${fallbackPython}`);
+ return fallbackPython;
+}
+
+/**
+ * Get the Python environment variables for memory queries.
+ * This ensures real_ladybug can be found in both dev and packaged modes.
+ */
+function getMemoryPythonEnv(): Record {
+ // Start with the standard Python environment from the manager
+ const baseEnv = pythonEnvManager.getPythonEnv();
+
+ // For packaged apps, ensure PYTHONPATH includes bundled site-packages
+ // even if the manager hasn't been fully initialized
+ if (app.isPackaged) {
+ const bundledSitePackages = path.join(process.resourcesPath, 'python-site-packages');
+ if (fs.existsSync(bundledSitePackages)) {
+ // Merge paths: bundled site-packages takes precedence
+ const existingPath = baseEnv.PYTHONPATH || '';
+ baseEnv.PYTHONPATH = existingPath
+ ? `${bundledSitePackages}${path.delimiter}${existingPath}`
+ : bundledSitePackages;
+ }
+ }
+
+ return baseEnv;
+}
+
/**
* Execute a Python memory query command
*/
@@ -120,7 +184,10 @@ async function executeQuery(
args: string[],
timeout: number = 10000
): Promise {
- const pythonCmd = getConfiguredPythonPath();
+ // Use getBackendPythonPath() to find the correct Python:
+ // - In dev mode: uses backend venv with real_ladybug installed
+ // - In packaged app: falls back to bundled Python
+ const pythonCmd = getBackendPythonPath();
const scriptPath = getQueryScriptPath();
if (!scriptPath) {
@@ -131,9 +198,16 @@ async function executeQuery(
return new Promise((resolve) => {
const fullArgs = [...baseArgs, scriptPath, command, ...args];
+
+ // Get Python environment (includes PYTHONPATH for bundled/venv packages)
+ // This is critical for finding real_ladybug (LadybugDB)
+ const pythonEnv = getMemoryPythonEnv();
+
const proc = spawn(pythonExe, fullArgs, {
stdio: ['ignore', 'pipe', 'pipe'],
timeout,
+ // Use pythonEnv which combines sanitized env + site-packages for real_ladybug
+ env: pythonEnv,
});
let stdout = '';
@@ -148,19 +222,29 @@ async function executeQuery(
});
proc.on('close', (code) => {
- if (code === 0 && stdout) {
+ // The Python script outputs JSON to stdout (even for errors)
+ // Always try to parse stdout first to get the actual error message
+ if (stdout) {
try {
const result = JSON.parse(stdout);
resolve(result);
+ return;
} catch {
+ // JSON parsing failed
+ if (code !== 0) {
+ const errorMsg = stderr || stdout || `Process exited with code ${code}`;
+ console.error('[MemoryService] Python error:', errorMsg);
+ resolve({ success: false, error: errorMsg });
+ return;
+ }
resolve({ success: false, error: `Invalid JSON response: ${stdout}` });
+ return;
}
- } else {
- resolve({
- success: false,
- error: stderr || `Process exited with code ${code}`,
- });
}
+ // No stdout - use stderr or generic error
+ const errorMsg = stderr || `Process exited with code ${code}`;
+ console.error('[MemoryService] Python error (no stdout):', errorMsg);
+ resolve({ success: false, error: errorMsg });
});
proc.on('error', (err) => {
@@ -183,7 +267,10 @@ async function executeSemanticQuery(
embedderConfig: EmbedderConfig,
timeout: number = 30000 // Longer timeout for embedding operations
): Promise {
- const pythonCmd = getConfiguredPythonPath();
+ // Use getBackendPythonPath() to find the correct Python:
+ // - In dev mode: uses backend venv with real_ladybug installed
+ // - In packaged app: falls back to bundled Python
+ const pythonCmd = getBackendPythonPath();
const scriptPath = getQueryScriptPath();
if (!scriptPath) {
@@ -192,8 +279,13 @@ async function executeSemanticQuery(
const [pythonExe, baseArgs] = parsePythonCommand(pythonCmd);
+ // Get Python environment (includes PYTHONPATH for bundled/venv packages)
+ // This is critical for finding real_ladybug (LadybugDB)
+ const pythonEnv = getMemoryPythonEnv();
+
// Build environment with embedder configuration
- const env: Record = { ...process.env };
+ // Use pythonEnv which combines sanitized env + site-packages for real_ladybug
+ const env: Record = { ...pythonEnv };
// Set the embedder provider
env.GRAPHITI_EMBEDDER_PROVIDER = embedderConfig.provider;
@@ -272,19 +364,26 @@ async function executeSemanticQuery(
});
proc.on('close', (code) => {
- if (code === 0 && stdout) {
+ // The Python script outputs JSON to stdout (even for errors)
+ if (stdout) {
try {
const result = JSON.parse(stdout);
resolve(result);
+ return;
} catch {
+ if (code !== 0) {
+ const errorMsg = stderr || stdout || `Process exited with code ${code}`;
+ console.error('[MemoryService] Semantic search error:', errorMsg);
+ resolve({ success: false, error: errorMsg });
+ return;
+ }
resolve({ success: false, error: `Invalid JSON response: ${stdout}` });
+ return;
}
- } else {
- resolve({
- success: false,
- error: stderr || `Process exited with code ${code}`,
- });
}
+ const errorMsg = stderr || `Process exited with code ${code}`;
+ console.error('[MemoryService] Semantic search error (no stdout):', errorMsg);
+ resolve({ success: false, error: errorMsg });
});
proc.on('error', (err) => {
@@ -526,6 +625,50 @@ export class MemoryService {
};
}
+ /**
+ * Add an episode to the memory database
+ *
+ * This allows the Electron app to save memories (like PR review insights)
+ * directly to LadybugDB without going through the full Graphiti system.
+ *
+ * @param name Episode name/title
+ * @param content Episode content (will be JSON stringified if object)
+ * @param episodeType Type of episode (session_insight, pattern, gotcha, task_outcome, pr_review)
+ * @param groupId Optional group ID for namespacing
+ * @returns Promise with the created episode info
+ */
+ async addEpisode(
+ name: string,
+ content: string | object,
+ episodeType: string = 'session_insight',
+ groupId?: string
+ ): Promise<{ success: boolean; id?: string; error?: string }> {
+ // Stringify content if it's an object
+ const contentStr = typeof content === 'object' ? JSON.stringify(content) : content;
+
+ const args = [
+ this.config.dbPath,
+ this.config.database,
+ '--name', name,
+ '--content', contentStr,
+ '--type', episodeType,
+ ];
+
+ if (groupId) {
+ args.push('--group-id', groupId);
+ }
+
+ const result = await executeQuery('add-episode', args);
+
+ if (!result.success) {
+ console.error('Failed to add episode:', result.error);
+ return { success: false, error: result.error };
+ }
+
+ const data = result.data as { id: string; name: string; type: string; timestamp: string };
+ return { success: true, id: data.id };
+ }
+
/**
* Close the database connection (no-op for subprocess model)
*/
diff --git a/apps/frontend/src/main/project-store.ts b/apps/frontend/src/main/project-store.ts
index 5d627c0160..5290b1f301 100644
--- a/apps/frontend/src/main/project-store.ts
+++ b/apps/frontend/src/main/project-store.ts
@@ -5,6 +5,7 @@ import { v4 as uuidv4 } from 'uuid';
import type { Project, ProjectSettings, Task, TaskStatus, TaskMetadata, ImplementationPlan, ReviewReason, PlanSubtask } from '../shared/types';
import { DEFAULT_PROJECT_SETTINGS, AUTO_BUILD_PATHS, getSpecsDir } from '../shared/constants';
import { getAutoBuildPath, isInitialized } from './project-initializer';
+import { getTaskWorktreeDir } from './worktree-paths';
interface TabState {
openProjectIds: string[];
@@ -18,12 +19,19 @@ interface StoreData {
tabState?: TabState;
}
+interface TasksCacheEntry {
+ tasks: Task[];
+ timestamp: number;
+}
+
/**
* Persistent storage for projects and settings
*/
export class ProjectStore {
private storePath: string;
private data: StoreData;
+ private tasksCache: Map = new Map();
+ private readonly CACHE_TTL_MS = 3000; // 3 seconds TTL for task cache
constructor() {
// Store in app's userData directory
@@ -235,15 +243,29 @@ export class ProjectStore {
/**
* Get tasks for a project by scanning specs directory
+ * Implements caching with 3-second TTL to prevent excessive worktree scanning
*/
getTasks(projectId: string): Task[] {
- console.warn('[ProjectStore] getTasks called with projectId:', projectId);
+ // Check cache first
+ const cached = this.tasksCache.get(projectId);
+ const now = Date.now();
+
+ if (cached && (now - cached.timestamp) < this.CACHE_TTL_MS) {
+ console.debug('[ProjectStore] Returning cached tasks for project:', projectId, '(age:', now - cached.timestamp, 'ms)');
+ return cached.tasks;
+ }
+
+ console.warn('[ProjectStore] getTasks called - will load from disk', {
+ projectId,
+ reason: cached ? 'cache expired' : 'cache miss',
+ cacheAge: cached ? now - cached.timestamp : 'N/A'
+ });
const project = this.getProject(projectId);
if (!project) {
console.warn('[ProjectStore] Project not found for id:', projectId);
return [];
}
- console.warn('[ProjectStore] Found project:', project.name, 'autoBuildPath:', project.autoBuildPath);
+ console.warn('[ProjectStore] Found project:', project.name, 'autoBuildPath:', project.autoBuildPath, 'path:', project.path);
const allTasks: Task[] = [];
const specsBaseDir = getSpecsDir(project.autoBuildPath);
@@ -263,8 +285,7 @@ export class ProjectStore {
// 2. Scan worktree specs directories
// NOTE FOR MAINTAINERS: Worktree tasks are only included if the spec also exists in main.
// This prevents deleted tasks from "coming back" when the worktree isn't cleaned up.
- // Alternative behavior: include all worktree tasks (remove the mainSpecIds check below).
- const worktreesDir = path.join(project.path, '.worktrees');
+ const worktreesDir = getTaskWorktreeDir(project.path);
if (existsSync(worktreesDir)) {
try {
const worktrees = readdirSync(worktreesDir, { withFileTypes: true });
@@ -302,10 +323,36 @@ export class ProjectStore {
}
const tasks = Array.from(taskMap.values());
- console.warn('[ProjectStore] Returning', tasks.length, 'unique tasks (after deduplication)');
+ console.warn('[ProjectStore] Scan complete - found', tasks.length, 'unique tasks', {
+ mainTasks: allTasks.filter(t => t.location === 'main').length,
+ worktreeTasks: allTasks.filter(t => t.location === 'worktree').length,
+ deduplicated: allTasks.length - tasks.length
+ });
+
+ // Update cache
+ this.tasksCache.set(projectId, { tasks, timestamp: now });
+
return tasks;
}
+ /**
+ * Invalidate the tasks cache for a specific project
+ * Call this when tasks are modified (created, deleted, status changed, etc.)
+ */
+ invalidateTasksCache(projectId: string): void {
+ this.tasksCache.delete(projectId);
+ console.debug('[ProjectStore] Invalidated tasks cache for project:', projectId);
+ }
+
+ /**
+ * Clear all tasks cache entries
+ * Useful for global refresh scenarios
+ */
+ clearTasksCache(): void {
+ this.tasksCache.clear();
+ console.debug('[ProjectStore] Cleared all tasks cache');
+ }
+
/**
* Load tasks from a specs directory (helper method for main project and worktrees)
*/
@@ -338,12 +385,22 @@ export class ProjectStore {
// Try to read implementation plan
let plan: ImplementationPlan | null = null;
if (existsSync(planPath)) {
+ console.warn(`[ProjectStore] Loading implementation_plan.json for spec: ${dir.name} from ${location}`);
try {
const content = readFileSync(planPath, 'utf-8');
plan = JSON.parse(content);
- } catch {
- // Ignore parse errors
+ console.warn(`[ProjectStore] Loaded plan for ${dir.name}:`, {
+ hasDescription: !!plan?.description,
+ hasFeature: !!plan?.feature,
+ status: plan?.status,
+ phaseCount: plan?.phases?.length || 0,
+ subtaskCount: plan?.phases?.flatMap(p => p.subtasks || []).length || 0
+ });
+ } catch (err) {
+ console.error(`[ProjectStore] Failed to parse implementation_plan.json for ${dir.name}:`, err);
}
+ } else {
+ console.warn(`[ProjectStore] No implementation_plan.json found for spec: ${dir.name} at ${planPath}`);
}
// PRIORITY 1: Read description from implementation_plan.json (user's original)
@@ -360,27 +417,8 @@ export class ProjectStore {
const reqContent = readFileSync(requirementsPath, 'utf-8');
const requirements = JSON.parse(reqContent);
if (requirements.task_description) {
- // Extract a clean summary from task_description (first line or first ~200 chars)
- const taskDesc = requirements.task_description;
- const firstLine = taskDesc.split('\n')[0].trim();
- // If the first line is a title like "Investigate GitHub Issue #36", use the next meaningful line
- if (firstLine.toLowerCase().startsWith('investigate') && taskDesc.includes('\n\n')) {
- const sections = taskDesc.split('\n\n');
- // Find the first paragraph that's not a title
- for (const section of sections) {
- const trimmed = section.trim();
- // Skip headers and short lines
- if (trimmed.startsWith('#') || trimmed.length < 20) continue;
- // Skip the "Please analyze" instruction at the end
- if (trimmed.startsWith('Please analyze')) continue;
- description = trimmed.substring(0, 200).split('\n')[0];
- break;
- }
- }
- // If still no description, use a shortened version of task_description
- if (!description) {
- description = firstLine.substring(0, 150);
- }
+ // Use the full task description for the modal view
+ description = requirements.task_description;
}
} catch {
// Ignore parse errors
@@ -544,6 +582,7 @@ export class ProjectStore {
'done': 'done',
'human_review': 'human_review',
'ai_review': 'ai_review',
+ 'pr_created': 'pr_created', // PR has been created for this task
'backlog': 'backlog'
};
const storedStatus = statusMap[plan.status];
@@ -553,6 +592,11 @@ export class ProjectStore {
return { status: 'done' };
}
+ // If task has a PR created, always respect that status
+ if (storedStatus === 'pr_created') {
+ return { status: 'pr_created' };
+ }
+
// For other stored statuses, validate against calculated status
if (storedStatus) {
// Planning/coding status from the backend should be respected even if subtasks aren't in progress yet
@@ -563,11 +607,16 @@ export class ProjectStore {
// planStatus: "review" indicates spec creation is complete and awaiting user approval
const isPlanReviewStage = (plan as unknown as { planStatus?: string })?.planStatus === 'review';
+ // Determine if there is remaining work to do
+ // True if: no subtasks exist yet (planning in progress) OR some subtasks are incomplete
+ // This prevents 'in_progress' from overriding 'human_review' when all work is done
+ const hasRemainingWork = allSubtasks.length === 0 || allSubtasks.some((s) => s.status !== 'completed');
+
const isStoredStatusValid =
(storedStatus === calculatedStatus) || // Matches calculated
- (storedStatus === 'human_review' && calculatedStatus === 'ai_review') || // Human review is more advanced than ai_review
+ (storedStatus === 'human_review' && (calculatedStatus === 'ai_review' || calculatedStatus === 'in_progress')) || // Human review is more advanced than ai_review or in_progress (fixes status loop bug)
(storedStatus === 'human_review' && isPlanReviewStage) || // Plan review stage (awaiting spec approval)
- (isActiveProcessStatus && storedStatus === 'in_progress'); // Planning/coding phases should show as in_progress
+ (isActiveProcessStatus && storedStatus === 'in_progress' && hasRemainingWork); // Planning/coding phases should show as in_progress ONLY when there's remaining work
if (isStoredStatusValid) {
// Preserve reviewReason for human_review status
@@ -643,7 +692,7 @@ export class ProjectStore {
}
// 2. Check worktrees
- const worktreesDir = path.join(projectPath, '.worktrees');
+ const worktreesDir = getTaskWorktreeDir(projectPath);
if (existsSync(worktreesDir)) {
try {
const worktrees = readdirSync(worktreesDir, { withFileTypes: true });
@@ -721,6 +770,9 @@ export class ProjectStore {
}
}
+ // Invalidate cache since task metadata changed
+ this.invalidateTasksCache(projectId);
+
return !hasErrors;
}
@@ -777,6 +829,9 @@ export class ProjectStore {
}
}
+ // Invalidate cache since task metadata changed
+ this.invalidateTasksCache(projectId);
+
return !hasErrors;
}
}
diff --git a/apps/frontend/src/main/python-env-manager.ts b/apps/frontend/src/main/python-env-manager.ts
index 608ba5fda5..fc9a0c90e9 100644
--- a/apps/frontend/src/main/python-env-manager.ts
+++ b/apps/frontend/src/main/python-env-manager.ts
@@ -122,19 +122,36 @@ export class PythonEnvManager extends EventEmitter {
return false;
}
- // Check for the marker file that indicates successful bundling
- const markerPath = path.join(sitePackagesPath, '.bundled');
- if (existsSync(markerPath)) {
- console.log(`[PythonEnvManager] Found bundle marker, using bundled packages`);
- return true;
+ // Critical packages that must exist for proper functionality
+ // This fixes GitHub issue #416 where marker exists but packages are missing
+ // Note: Same list exists in download-python.cjs - keep them in sync
+ // This validation assumes traditional Python packages with __init__.py (not PEP 420 namespace packages)
+ const criticalPackages = ['claude_agent_sdk', 'dotenv', 'pydantic_core'];
+
+ // Check each package exists with valid structure (directory + __init__.py)
+ const missingPackages = criticalPackages.filter((pkg) => {
+ const pkgPath = path.join(sitePackagesPath, pkg);
+ const initPath = path.join(pkgPath, '__init__.py');
+ // Package is valid if directory and __init__.py both exist
+ return !existsSync(pkgPath) || !existsSync(initPath);
+ });
+
+ // Log missing packages for debugging
+ for (const pkg of missingPackages) {
+ console.log(
+ `[PythonEnvManager] Missing critical package: ${pkg} at ${path.join(sitePackagesPath, pkg)}`
+ );
}
- // Fallback: check if key packages exist
- // This handles cases where the marker might be missing but packages are there
- const claudeSdkPath = path.join(sitePackagesPath, 'claude_agent_sdk');
- const dotenvPath = path.join(sitePackagesPath, 'dotenv');
- if (existsSync(claudeSdkPath) || existsSync(dotenvPath)) {
- console.log(`[PythonEnvManager] Found key packages, using bundled packages`);
+ // All packages must exist - don't rely solely on marker file
+ if (missingPackages.length === 0) {
+ // Also check marker for logging purposes
+ const markerPath = path.join(sitePackagesPath, '.bundled');
+ if (existsSync(markerPath)) {
+ console.log(`[PythonEnvManager] Found bundle marker and all critical packages`);
+ } else {
+ console.log(`[PythonEnvManager] Found critical packages (marker missing)`);
+ }
return true;
}
@@ -619,23 +636,40 @@ if sys.version_info >= (3, 12):
/**
* Get environment variables that should be set when spawning Python processes.
* This ensures Python finds the bundled packages or venv packages.
+ *
+ * IMPORTANT: This returns a COMPLETE environment (based on process.env) with
+ * problematic Python variables removed. This fixes the "Could not find platform
+ * independent libraries " error on Windows when PYTHONHOME is set.
+ *
+ * @see https://github.com/AndyMik90/Auto-Claude/issues/176
*/
getPythonEnv(): Record {
- const env: Record = {
+ // Start with process.env but explicitly remove problematic Python variables
+ // PYTHONHOME causes "Could not find platform independent libraries" when set
+ // to a different Python installation than the one we're spawning
+ const baseEnv: Record = {};
+
+ for (const [key, value] of Object.entries(process.env)) {
+ // Skip PYTHONHOME - it causes the "platform independent libraries" error
+ // Use case-insensitive check for Windows compatibility (env vars are case-insensitive on Windows)
+ // Skip undefined values (TypeScript type guard)
+ if (key.toUpperCase() !== 'PYTHONHOME' && value !== undefined) {
+ baseEnv[key] = value;
+ }
+ }
+
+ // Apply our Python configuration on top
+ return {
+ ...baseEnv,
// Don't write bytecode - not needed and avoids permission issues
PYTHONDONTWRITEBYTECODE: '1',
// Use UTF-8 encoding
PYTHONIOENCODING: 'utf-8',
// Disable user site-packages to avoid conflicts
PYTHONNOUSERSITE: '1',
+ // Override PYTHONPATH if we have bundled packages
+ ...(this.sitePackagesPath ? { PYTHONPATH: this.sitePackagesPath } : {}),
};
-
- // Set PYTHONPATH to our site-packages
- if (this.sitePackagesPath) {
- env.PYTHONPATH = this.sitePackagesPath;
- }
-
- return env;
}
/**
diff --git a/apps/frontend/src/main/release-service.ts b/apps/frontend/src/main/release-service.ts
index ed7367d5db..b05152256d 100644
--- a/apps/frontend/src/main/release-service.ts
+++ b/apps/frontend/src/main/release-service.ts
@@ -344,16 +344,12 @@ export class ReleaseService extends EventEmitter {
tasks: Task[]
): Promise {
const unmerged: UnmergedWorktreeInfo[] = [];
-
- // Get worktrees directory
- const worktreesDir = path.join(projectPath, '.worktrees', 'auto-claude');
+ const worktreesDir = path.join(projectPath, '.auto-claude', 'worktrees', 'tasks');
if (!existsSync(worktreesDir)) {
- // No worktrees exist at all - all clear
return [];
}
- // List all spec worktrees
let worktreeFolders: string[];
try {
worktreeFolders = readdirSync(worktreesDir, { withFileTypes: true })
@@ -366,17 +362,16 @@ export class ReleaseService extends EventEmitter {
// Check each spec ID that's in this release
for (const specId of releaseSpecIds) {
// Find the worktree folder for this spec
- // Spec IDs are like "001-feature-name", worktree folders match
- const worktreeFolder = worktreeFolders.find(folder =>
+ const matchingFolder = worktreeFolders.find(folder =>
folder === specId || folder.startsWith(`${specId}-`)
);
- if (!worktreeFolder) {
+ if (!matchingFolder) {
// No worktree for this spec - it's already merged/cleaned up
continue;
}
- const worktreePath = path.join(worktreesDir, worktreeFolder);
+ const worktreePath = path.join(worktreesDir, matchingFolder);
// Get the task info for better error messages
const task = tasks.find(t => t.specId === specId);
diff --git a/apps/frontend/src/main/sentry.ts b/apps/frontend/src/main/sentry.ts
new file mode 100644
index 0000000000..0ab4e6602a
--- /dev/null
+++ b/apps/frontend/src/main/sentry.ts
@@ -0,0 +1,167 @@
+/**
+ * Sentry Error Tracking for Main Process
+ *
+ * Initializes Sentry with:
+ * - beforeSend hook for mid-session toggle support (no restart needed)
+ * - Path masking for user privacy (shared with renderer)
+ * - IPC listener for settings changes from renderer
+ *
+ * Privacy Note:
+ * - Usernames are masked from all file paths
+ * - Project paths remain visible for debugging (this is expected)
+ * - Tags, contexts, extra data, and user info are all sanitized
+ */
+
+import * as Sentry from '@sentry/electron/main';
+import { app, ipcMain } from 'electron';
+import { readSettingsFile } from './settings-utils';
+import { DEFAULT_APP_SETTINGS } from '../shared/constants';
+import { IPC_CHANNELS } from '../shared/constants/ipc';
+import {
+ processEvent,
+ PRODUCTION_TRACE_SAMPLE_RATE,
+ type SentryErrorEvent
+} from '../shared/utils/sentry-privacy';
+
+// In-memory state for current setting (updated via IPC when user toggles)
+let sentryEnabledState = true;
+
+/**
+ * Get Sentry DSN from environment variable
+ *
+ * For local development/testing:
+ * - Add SENTRY_DSN to your .env file, or
+ * - Run: SENTRY_DSN=your-dsn npm start
+ *
+ * For CI/CD releases:
+ * - Set SENTRY_DSN as a GitHub Actions secret
+ *
+ * For forks:
+ * - Without SENTRY_DSN, Sentry is disabled (safe for forks)
+ */
+function getSentryDsn(): string {
+ return process.env.SENTRY_DSN || '';
+}
+
+/**
+ * Get trace sample rate from environment variable
+ * Controls performance monitoring sampling (0.0 to 1.0)
+ * Default: 0.1 (10%) in production, 0 in development
+ */
+function getTracesSampleRate(): number {
+ const envValue = process.env.SENTRY_TRACES_SAMPLE_RATE;
+ if (envValue !== undefined) {
+ const parsed = parseFloat(envValue);
+ if (!isNaN(parsed) && parsed >= 0 && parsed <= 1) {
+ return parsed;
+ }
+ }
+ // Default: 10% in production, 0 in dev
+ return app.isPackaged ? PRODUCTION_TRACE_SAMPLE_RATE : 0;
+}
+
+/**
+ * Get profile sample rate from environment variable
+ * Controls profiling sampling relative to traces (0.0 to 1.0)
+ * Default: 0.1 (10%) in production, 0 in development
+ */
+function getProfilesSampleRate(): number {
+ const envValue = process.env.SENTRY_PROFILES_SAMPLE_RATE;
+ if (envValue !== undefined) {
+ const parsed = parseFloat(envValue);
+ if (!isNaN(parsed) && parsed >= 0 && parsed <= 1) {
+ return parsed;
+ }
+ }
+ // Default: 10% in production, 0 in dev
+ return app.isPackaged ? PRODUCTION_TRACE_SAMPLE_RATE : 0;
+}
+
+// Cache config so renderer can access it via IPC
+let cachedDsn: string = '';
+let cachedTracesSampleRate: number = 0;
+let cachedProfilesSampleRate: number = 0;
+
+/**
+ * Initialize Sentry for the main process
+ * Called early in app startup, before window creation
+ */
+export function initSentryMain(): void {
+ // Get configuration from environment variables
+ cachedDsn = getSentryDsn();
+ cachedTracesSampleRate = getTracesSampleRate();
+ cachedProfilesSampleRate = getProfilesSampleRate();
+
+ // Read initial setting from disk synchronously
+ const savedSettings = readSettingsFile();
+ const settings = { ...DEFAULT_APP_SETTINGS, ...savedSettings };
+ sentryEnabledState = settings.sentryEnabled ?? true;
+
+ // Check if we have a DSN - if not, Sentry is effectively disabled
+ const hasDsn = cachedDsn.length > 0;
+ const shouldEnable = hasDsn && (app.isPackaged || process.env.SENTRY_DEV === 'true');
+
+ if (!hasDsn) {
+ console.log('[Sentry] No SENTRY_DSN configured - error reporting disabled');
+ console.log('[Sentry] To enable: set SENTRY_DSN environment variable');
+ }
+
+ Sentry.init({
+ dsn: cachedDsn,
+ environment: app.isPackaged ? 'production' : 'development',
+ release: `auto-claude@${app.getVersion()}`,
+
+ beforeSend(event: Sentry.ErrorEvent) {
+ if (!sentryEnabledState) {
+ return null;
+ }
+ // Process event with shared privacy utility
+ return processEvent(event as SentryErrorEvent) as Sentry.ErrorEvent;
+ },
+
+ // Sample rates from environment variables (default: 10% in production, 0 in dev)
+ tracesSampleRate: cachedTracesSampleRate,
+ profilesSampleRate: cachedProfilesSampleRate,
+
+ // Only enable if we have a DSN and are in production (or SENTRY_DEV is set)
+ enabled: shouldEnable,
+ });
+
+ // Listen for settings changes from renderer process
+ ipcMain.on(IPC_CHANNELS.SENTRY_STATE_CHANGED, (_event, enabled: boolean) => {
+ sentryEnabledState = enabled;
+ console.log(`[Sentry] Error reporting ${enabled ? 'enabled' : 'disabled'} (via IPC)`);
+ });
+
+ // IPC handler for renderer to get Sentry config
+ ipcMain.handle(IPC_CHANNELS.GET_SENTRY_DSN, () => {
+ return cachedDsn;
+ });
+
+ ipcMain.handle(IPC_CHANNELS.GET_SENTRY_CONFIG, () => {
+ return {
+ dsn: cachedDsn,
+ tracesSampleRate: cachedTracesSampleRate,
+ profilesSampleRate: cachedProfilesSampleRate,
+ };
+ });
+
+ if (hasDsn) {
+ console.log(`[Sentry] Main process initialized (enabled: ${sentryEnabledState}, traces: ${cachedTracesSampleRate}, profiles: ${cachedProfilesSampleRate})`);
+ }
+}
+
+/**
+ * Get current Sentry enabled state
+ */
+export function isSentryEnabled(): boolean {
+ return sentryEnabledState;
+}
+
+/**
+ * Set Sentry enabled state programmatically
+ */
+export function setSentryEnabled(enabled: boolean): void {
+ sentryEnabledState = enabled;
+ console.log(`[Sentry] Error reporting ${enabled ? 'enabled' : 'disabled'} (programmatic)`);
+}
diff --git a/apps/frontend/src/main/services/profile-service.test.ts b/apps/frontend/src/main/services/profile-service.test.ts
new file mode 100644
index 0000000000..028e7c9bdf
--- /dev/null
+++ b/apps/frontend/src/main/services/profile-service.test.ts
@@ -0,0 +1,1031 @@
+/**
+ * Tests for profile-service.ts
+ *
+ * Red phase - write failing tests first
+ */
+
+import { describe, it, expect, vi, beforeEach } from 'vitest';
+import {
+ validateBaseUrl,
+ validateApiKey,
+ validateProfileNameUnique,
+ createProfile,
+ updateProfile,
+ getAPIProfileEnv,
+ testConnection
+} from './profile-service';
+import type { APIProfile, ProfilesFile, TestConnectionResult } from '../../shared/types/profile';
+
+// Mock profile-manager
+vi.mock('../utils/profile-manager', () => ({
+ loadProfilesFile: vi.fn(),
+ saveProfilesFile: vi.fn(),
+ generateProfileId: vi.fn(() => 'mock-uuid-1234')
+}));
+
+describe('profile-service', () => {
+ describe('validateBaseUrl', () => {
+ it('should accept valid HTTPS URLs', () => {
+ expect(validateBaseUrl('https://api.anthropic.com')).toBe(true);
+ expect(validateBaseUrl('https://custom-api.example.com')).toBe(true);
+ expect(validateBaseUrl('https://api.example.com/v1')).toBe(true);
+ });
+
+ it('should accept valid HTTP URLs', () => {
+ expect(validateBaseUrl('http://localhost:8080')).toBe(true);
+ expect(validateBaseUrl('http://127.0.0.1:8000')).toBe(true);
+ });
+
+ it('should reject invalid URLs', () => {
+ expect(validateBaseUrl('not-a-url')).toBe(false);
+ expect(validateBaseUrl('ftp://example.com')).toBe(false);
+ expect(validateBaseUrl('')).toBe(false);
+ expect(validateBaseUrl('https://')).toBe(false);
+ });
+
+ it('should reject URLs without valid format', () => {
+ expect(validateBaseUrl('anthropic.com')).toBe(false);
+ expect(validateBaseUrl('://api.anthropic.com')).toBe(false);
+ });
+ });
+
+ describe('validateApiKey', () => {
+ it('should accept Anthropic API key format (sk-ant-...)', () => {
+ expect(validateApiKey('sk-ant-api03-12345')).toBe(true);
+ expect(validateApiKey('sk-ant-test-key')).toBe(true);
+ });
+
+ it('should accept OpenAI API key format (sk-...)', () => {
+ expect(validateApiKey('sk-proj-12345')).toBe(true);
+ expect(validateApiKey('sk-test-key-12345')).toBe(true);
+ });
+
+ it('should accept custom API keys with reasonable length', () => {
+ expect(validateApiKey('custom-key-12345678')).toBe(true);
+ expect(validateApiKey('x-api-key-abcdefghij')).toBe(true);
+ });
+
+ it('should reject empty or too short keys', () => {
+ expect(validateApiKey('')).toBe(false);
+ expect(validateApiKey('sk-')).toBe(false);
+ expect(validateApiKey('abc')).toBe(false);
+ });
+
+ it('should reject keys with only whitespace', () => {
+ expect(validateApiKey(' ')).toBe(false);
+ expect(validateApiKey('\t\n')).toBe(false);
+ });
+ });
+
+ describe('validateProfileNameUnique', () => {
+ it('should return true when name is unique', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: '1',
+ name: 'Existing Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await validateProfileNameUnique('New Profile');
+ expect(result).toBe(true);
+ });
+
+ it('should return false when name already exists', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: '1',
+ name: 'Existing Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await validateProfileNameUnique('Existing Profile');
+ expect(result).toBe(false);
+ });
+
+ it('should be case-insensitive for duplicate detection', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: '1',
+ name: 'My Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result1 = await validateProfileNameUnique('my profile');
+ const result2 = await validateProfileNameUnique('MY PROFILE');
+ expect(result1).toBe(false);
+ expect(result2).toBe(false);
+ });
+
+ it('should trim whitespace before checking', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: '1',
+ name: 'My Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await validateProfileNameUnique(' My Profile ');
+ expect(result).toBe(false);
+ });
+ });
+
+ describe('createProfile', () => {
+ it('should create profile with valid data and save', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile, saveProfilesFile, generateProfileId } =
+ await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+ vi.mocked(saveProfilesFile).mockResolvedValue(undefined);
+ vi.mocked(generateProfileId).mockReturnValue('generated-id-123');
+
+ const input = {
+ name: 'Test Profile',
+ baseUrl: 'https://api.anthropic.com',
+ apiKey: 'sk-ant-test-key',
+ models: {
+ default: 'claude-3-5-sonnet-20241022'
+ }
+ };
+
+ const result = await createProfile(input);
+
+ expect(result).toMatchObject({
+ id: 'generated-id-123',
+ name: 'Test Profile',
+ baseUrl: 'https://api.anthropic.com',
+ apiKey: 'sk-ant-test-key',
+ models: {
+ default: 'claude-3-5-sonnet-20241022'
+ }
+ });
+ expect(result.createdAt).toBeGreaterThan(0);
+ expect(result.updatedAt).toBeGreaterThan(0);
+ expect(saveProfilesFile).toHaveBeenCalled();
+ });
+
+ it('should throw error for invalid base URL', async () => {
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue({
+ profiles: [],
+ activeProfileId: null,
+ version: 1
+ });
+
+ const input = {
+ name: 'Test Profile',
+ baseUrl: 'not-a-url',
+ apiKey: 'sk-ant-test-key'
+ };
+
+ await expect(createProfile(input)).rejects.toThrow('Invalid base URL');
+ });
+
+ it('should throw error for invalid API key', async () => {
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue({
+ profiles: [],
+ activeProfileId: null,
+ version: 1
+ });
+
+ const input = {
+ name: 'Test Profile',
+ baseUrl: 'https://api.anthropic.com',
+ apiKey: 'too-short'
+ };
+
+ await expect(createProfile(input)).rejects.toThrow('Invalid API key');
+ });
+
+ it('should throw error for duplicate profile name', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: '1',
+ name: 'Existing Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const input = {
+ name: 'Existing Profile',
+ baseUrl: 'https://api.anthropic.com',
+ apiKey: 'sk-ant-test-key'
+ };
+
+ await expect(createProfile(input)).rejects.toThrow(
+ 'A profile with this name already exists'
+ );
+ });
+ });
+
+ describe('updateProfile', () => {
+ it('should update profile name and other fields', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'existing-id',
+ name: 'Old Name',
+ baseUrl: 'https://old-api.example.com',
+ apiKey: 'sk-old-key-12345678',
+ createdAt: 1000000,
+ updatedAt: 1000000
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile, saveProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+ vi.mocked(saveProfilesFile).mockResolvedValue(undefined);
+
+ const input = {
+ id: 'existing-id',
+ name: 'New Name',
+ baseUrl: 'https://new-api.example.com',
+ apiKey: 'sk-new-api-key-123',
+ models: { default: 'claude-3-5-sonnet-20241022' }
+ };
+
+ const result = await updateProfile(input);
+
+ expect(result.name).toBe('New Name');
+ expect(result.baseUrl).toBe('https://new-api.example.com');
+ expect(result.apiKey).toBe('sk-new-api-key-123');
+ expect(result.models).toEqual({ default: 'claude-3-5-sonnet-20241022' });
+ expect(result.updatedAt).toBeGreaterThan(1000000); // updatedAt should be refreshed
+ expect(result.createdAt).toBe(1000000); // createdAt should remain unchanged
+ });
+
+ it('should allow updating profile with same name (case-insensitive)', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'existing-id',
+ name: 'My Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-old-api-key-123',
+ createdAt: 1000000,
+ updatedAt: 1000000
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile, saveProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+ vi.mocked(saveProfilesFile).mockResolvedValue(undefined);
+
+ const input = {
+ id: 'existing-id',
+ name: 'my profile', // Same name, different case
+ baseUrl: 'https://new-api.example.com',
+ apiKey: 'sk-new-api-key-456'
+ };
+
+ const result = await updateProfile(input);
+ expect(result.name).toBe('my profile');
+ expect(saveProfilesFile).toHaveBeenCalled();
+ });
+
+ it('should throw error when name conflicts with another profile', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Profile One',
+ baseUrl: 'https://api1.example.com',
+ apiKey: 'sk-key-one-12345678',
+ createdAt: 1000000,
+ updatedAt: 1000000
+ },
+ {
+ id: 'profile-2',
+ name: 'Profile Two',
+ baseUrl: 'https://api2.example.com',
+ apiKey: 'sk-key-two-12345678',
+ createdAt: 1000000,
+ updatedAt: 1000000
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const input = {
+ id: 'profile-1',
+ name: 'Profile Two', // Name that exists on profile-2
+ baseUrl: 'https://api1.example.com',
+ apiKey: 'sk-key-one-12345678'
+ };
+
+ await expect(updateProfile(input)).rejects.toThrow(
+ 'A profile with this name already exists'
+ );
+ });
+
+ it('should throw error for invalid base URL', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'existing-id',
+ name: 'Test Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test-api-key-123',
+ createdAt: 1000000,
+ updatedAt: 1000000
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const input = {
+ id: 'existing-id',
+ name: 'Test Profile',
+ baseUrl: 'not-a-url',
+ apiKey: 'sk-test-api-key-123'
+ };
+
+ await expect(updateProfile(input)).rejects.toThrow('Invalid base URL');
+ });
+
+ it('should throw error for invalid API key', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'existing-id',
+ name: 'Test Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test-api-key-123',
+ createdAt: 1000000,
+ updatedAt: 1000000
+ }
+ ],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const input = {
+ id: 'existing-id',
+ name: 'Test Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'too-short'
+ };
+
+ await expect(updateProfile(input)).rejects.toThrow('Invalid API key');
+ });
+
+ it('should throw error when profile not found', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [],
+ activeProfileId: null,
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const input = {
+ id: 'non-existent-id',
+ name: 'Test Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test-api-key-123'
+ };
+
+ await expect(updateProfile(input)).rejects.toThrow('Profile not found');
+ });
+ });
+
+ describe('getAPIProfileEnv', () => {
+ it('should return empty object when no active profile (OAuth mode)', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Test Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test-key-12345678',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: null, // No active profile = OAuth mode
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+ expect(result).toEqual({});
+ });
+
+ it('should return empty object when activeProfileId is empty string', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Test Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test-key-12345678',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: '',
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+ expect(result).toEqual({});
+ });
+
+ it('should return correct env vars for active profile with all fields', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Test Profile',
+ baseUrl: 'https://api.custom.com',
+ apiKey: 'sk-test-key-12345678',
+ models: {
+ default: 'claude-3-5-sonnet-20241022',
+ haiku: 'claude-3-5-haiku-20241022',
+ sonnet: 'claude-3-5-sonnet-20241022',
+ opus: 'claude-3-5-opus-20241022'
+ },
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: 'profile-1',
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+
+ expect(result).toEqual({
+ ANTHROPIC_BASE_URL: 'https://api.custom.com',
+ ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678',
+ ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022',
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: 'claude-3-5-haiku-20241022',
+ ANTHROPIC_DEFAULT_SONNET_MODEL: 'claude-3-5-sonnet-20241022',
+ ANTHROPIC_DEFAULT_OPUS_MODEL: 'claude-3-5-opus-20241022'
+ });
+ });
+
+ it('should filter out empty string values', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Test Profile',
+ baseUrl: '',
+ apiKey: 'sk-test-key-12345678',
+ models: {
+ default: 'claude-3-5-sonnet-20241022',
+ haiku: '',
+ sonnet: ''
+ },
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: 'profile-1',
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+
+ // Empty baseUrl should be filtered out
+ expect(result).not.toHaveProperty('ANTHROPIC_BASE_URL');
+ // Empty model values should be filtered out
+ expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL');
+ expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL');
+ // Non-empty values should be present
+ expect(result).toEqual({
+ ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678',
+ ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022'
+ });
+ });
+
+ it('should handle missing models object', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Test Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test-key-12345678',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ // No models property
+ }
+ ],
+ activeProfileId: 'profile-1',
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+
+ expect(result).toEqual({
+ ANTHROPIC_BASE_URL: 'https://api.example.com',
+ ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678'
+ });
+ expect(result).not.toHaveProperty('ANTHROPIC_MODEL');
+ expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL');
+ expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL');
+ expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_OPUS_MODEL');
+ });
+
+ it('should handle partial model configurations', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Test Profile',
+ baseUrl: 'https://api.example.com',
+ apiKey: 'sk-test-key-12345678',
+ models: {
+ default: 'claude-3-5-sonnet-20241022'
+ // Only default model set
+ },
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: 'profile-1',
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+
+ expect(result).toEqual({
+ ANTHROPIC_BASE_URL: 'https://api.example.com',
+ ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678',
+ ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022'
+ });
+ expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_HAIKU_MODEL');
+ expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_SONNET_MODEL');
+ expect(result).not.toHaveProperty('ANTHROPIC_DEFAULT_OPUS_MODEL');
+ });
+
+ it('should find active profile by id when multiple profiles exist', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Profile One',
+ baseUrl: 'https://api1.example.com',
+ apiKey: 'sk-key-one-12345678',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ },
+ {
+ id: 'profile-2',
+ name: 'Profile Two',
+ baseUrl: 'https://api2.example.com',
+ apiKey: 'sk-key-two-12345678',
+ models: { default: 'claude-3-5-sonnet-20241022' },
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ },
+ {
+ id: 'profile-3',
+ name: 'Profile Three',
+ baseUrl: 'https://api3.example.com',
+ apiKey: 'sk-key-three-12345678',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: 'profile-2',
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+
+ expect(result).toEqual({
+ ANTHROPIC_BASE_URL: 'https://api2.example.com',
+ ANTHROPIC_AUTH_TOKEN: 'sk-key-two-12345678',
+ ANTHROPIC_MODEL: 'claude-3-5-sonnet-20241022'
+ });
+ });
+
+ it('should handle profile not found (activeProfileId points to non-existent profile)', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Profile One',
+ baseUrl: 'https://api1.example.com',
+ apiKey: 'sk-key-one-12345678',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: 'non-existent-id', // Points to profile that doesn't exist
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+
+ // Should return empty object gracefully
+ expect(result).toEqual({});
+ });
+
+ it('should trim whitespace from values before filtering', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Test Profile',
+ baseUrl: ' https://api.example.com ', // Has whitespace
+ apiKey: 'sk-test-key-12345678',
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: 'profile-1',
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+
+ // Whitespace should be trimmed, not filtered out
+ expect(result).toEqual({
+ ANTHROPIC_BASE_URL: 'https://api.example.com', // Trimmed
+ ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678'
+ });
+ });
+
+ it('should filter out whitespace-only values', async () => {
+ const mockFile: ProfilesFile = {
+ profiles: [
+ {
+ id: 'profile-1',
+ name: 'Test Profile',
+ baseUrl: ' ', // Whitespace only
+ apiKey: 'sk-test-key-12345678',
+ models: {
+ default: ' ' // Whitespace only
+ },
+ createdAt: Date.now(),
+ updatedAt: Date.now()
+ }
+ ],
+ activeProfileId: 'profile-1',
+ version: 1
+ };
+
+ const { loadProfilesFile } = await import('../utils/profile-manager');
+ vi.mocked(loadProfilesFile).mockResolvedValue(mockFile);
+
+ const result = await getAPIProfileEnv();
+
+ // Whitespace-only values should be filtered out
+ expect(result).not.toHaveProperty('ANTHROPIC_BASE_URL');
+ expect(result).not.toHaveProperty('ANTHROPIC_MODEL');
+ expect(result).toEqual({
+ ANTHROPIC_AUTH_TOKEN: 'sk-test-key-12345678'
+ });
+ });
+ });
+
+ describe('testConnection', () => {
+ beforeEach(() => {
+ // Mock fetch globally for testConnection tests
+ global.fetch = vi.fn();
+ });
+
+ it('should return success for valid credentials (200 response)', async () => {
+ vi.mocked(global.fetch).mockResolvedValue({
+ ok: true,
+ status: 200,
+ json: async () => ({ data: [] })
+ } as Response);
+
+ const result = await testConnection('https://api.anthropic.com', 'sk-ant-test-key-12');
+
+ expect(result).toEqual({
+ success: true,
+ message: 'Connection successful'
+ });
+ expect(global.fetch).toHaveBeenCalledWith(
+ 'https://api.anthropic.com/v1/models',
+ expect.objectContaining({
+ method: 'GET',
+ headers: expect.objectContaining({
+ 'x-api-key': 'sk-ant-test-key-12',
+ 'anthropic-version': '2023-06-01'
+ })
+ })
+ );
+ });
+
+ it('should return auth error for invalid API key (401 response)', async () => {
+ vi.mocked(global.fetch).mockResolvedValue({
+ ok: false,
+ status: 401,
+ statusText: 'Unauthorized'
+ } as Response);
+
+ const result = await testConnection('https://api.anthropic.com', 'sk-invalid-key-12');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'auth',
+ message: 'Authentication failed. Please check your API key.'
+ });
+ });
+
+ it('should return auth error for 403 response', async () => {
+ vi.mocked(global.fetch).mockResolvedValue({
+ ok: false,
+ status: 403,
+ statusText: 'Forbidden'
+ } as Response);
+
+ const result = await testConnection('https://api.anthropic.com', 'sk-forbidden-key');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'auth',
+ message: 'Authentication failed. Please check your API key.'
+ });
+ });
+
+ it('should return endpoint error for invalid URL (404 response)', async () => {
+ vi.mocked(global.fetch).mockResolvedValue({
+ ok: false,
+ status: 404,
+ statusText: 'Not Found'
+ } as Response);
+
+ const result = await testConnection('https://invalid.example.com', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'endpoint',
+ message: 'Invalid endpoint. Please check the Base URL.'
+ });
+ });
+
+ it('should return network error for connection refused', async () => {
+ const networkError = new TypeError('Failed to fetch');
+ (networkError as any).code = 'ECONNREFUSED';
+
+ vi.mocked(global.fetch).mockRejectedValue(networkError);
+
+ const result = await testConnection('https://unreachable.example.com', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'network',
+ message: 'Network error. Please check your internet connection.'
+ });
+ });
+
+ it('should return network error for ENOTFOUND (DNS failure)', async () => {
+ const dnsError = new TypeError('Failed to fetch');
+ (dnsError as any).code = 'ENOTFOUND';
+
+ vi.mocked(global.fetch).mockRejectedValue(dnsError);
+
+ const result = await testConnection('https://nosuchdomain.example.com', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'network',
+ message: 'Network error. Please check your internet connection.'
+ });
+ });
+
+ it('should return timeout error for AbortError', async () => {
+ const abortError = new Error('Aborted');
+ abortError.name = 'AbortError';
+
+ vi.mocked(global.fetch).mockRejectedValue(abortError);
+
+ const result = await testConnection('https://slow.example.com', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'timeout',
+ message: 'Connection timeout. The endpoint did not respond.'
+ });
+ });
+
+ it('should return unknown error for other failures', async () => {
+ vi.mocked(global.fetch).mockRejectedValue(new Error('Unknown error'));
+
+ const result = await testConnection('https://api.example.com', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'unknown',
+ message: 'Connection test failed. Please try again.'
+ });
+ });
+
+ it('should auto-prepend https:// if missing', async () => {
+ vi.mocked(global.fetch).mockResolvedValue({
+ ok: true,
+ status: 200,
+ json: async () => ({ data: [] })
+ } as Response);
+
+ await testConnection('api.anthropic.com', 'sk-test-key-12chars');
+
+ expect(global.fetch).toHaveBeenCalledWith(
+ 'https://api.anthropic.com/v1/models',
+ expect.any(Object)
+ );
+ });
+
+ it('should remove trailing slash from baseUrl', async () => {
+ vi.mocked(global.fetch).mockResolvedValue({
+ ok: true,
+ status: 200,
+ json: async () => ({ data: [] })
+ } as Response);
+
+ await testConnection('https://api.anthropic.com/', 'sk-test-key-12chars');
+
+ expect(global.fetch).toHaveBeenCalledWith(
+ 'https://api.anthropic.com/v1/models',
+ expect.any(Object)
+ );
+ });
+
+ it('should return error for empty baseUrl', async () => {
+ const result = await testConnection('', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'endpoint',
+ message: 'Invalid endpoint. Please check the Base URL.'
+ });
+ expect(global.fetch).not.toHaveBeenCalled();
+ });
+
+ it('should return error for invalid baseUrl format', async () => {
+ const result = await testConnection('ftp://invalid-protocol.com', 'sk-test-key-12chars');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'endpoint',
+ message: 'Invalid endpoint. Please check the Base URL.'
+ });
+ expect(global.fetch).not.toHaveBeenCalled();
+ });
+
+ it('should return error for invalid API key format', async () => {
+ const result = await testConnection('https://api.anthropic.com', 'short');
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'auth',
+ message: 'Authentication failed. Please check your API key.'
+ });
+ expect(global.fetch).not.toHaveBeenCalled();
+ });
+
+ it('should abort when signal is triggered', async () => {
+ const abortController = new AbortController();
+ const abortError = new Error('Aborted');
+ abortError.name = 'AbortError';
+
+ vi.mocked(global.fetch).mockRejectedValue(abortError);
+
+ // Abort immediately
+ abortController.abort();
+
+ const result = await testConnection('https://api.anthropic.com', 'sk-test-key-12chars', abortController.signal);
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'timeout',
+ message: 'Connection timeout. The endpoint did not respond.'
+ });
+ });
+
+ it('should set 10 second timeout', async () => {
+ vi.mocked(global.fetch).mockImplementation(() =>
+ new Promise((_, reject) => {
+ setTimeout(() => {
+ const abortError = new Error('Aborted');
+ abortError.name = 'AbortError';
+ reject(abortError);
+ }, 100); // Short delay for test
+ })
+ );
+
+ const startTime = Date.now();
+ const result = await testConnection('https://slow.example.com', 'sk-test-key-12chars');
+ const elapsed = Date.now() - startTime;
+
+ expect(result).toEqual({
+ success: false,
+ errorType: 'timeout',
+ message: 'Connection timeout. The endpoint did not respond.'
+ });
+ // Should timeout at 10 seconds, but we use a mock for faster test
+ expect(elapsed).toBeLessThan(5000); // Well under 10s due to mock
+ });
+ });
+});
diff --git a/apps/frontend/src/main/services/profile-service.ts b/apps/frontend/src/main/services/profile-service.ts
new file mode 100644
index 0000000000..a58651ac56
--- /dev/null
+++ b/apps/frontend/src/main/services/profile-service.ts
@@ -0,0 +1,510 @@
+/**
+ * Profile Service - Validation and profile creation
+ *
+ * Provides validation functions for URL, API key, and profile name uniqueness.
+ * Handles creating new profiles with validation.
+ */
+
+import { loadProfilesFile, saveProfilesFile, generateProfileId } from '../utils/profile-manager';
+import type { APIProfile, TestConnectionResult } from '../../shared/types/profile';
+
+/**
+ * Validate base URL format
+ * Accepts HTTP(S) URLs with valid endpoints
+ */
+export function validateBaseUrl(baseUrl: string): boolean {
+ if (!baseUrl || baseUrl.trim() === '') {
+ return false;
+ }
+
+ try {
+ const url = new URL(baseUrl);
+ // Only allow http and https protocols
+ return url.protocol === 'http:' || url.protocol === 'https:';
+ } catch {
+ return false;
+ }
+}
+
+/**
+ * Validate API key format
+ * Accepts various API key formats (Anthropic, OpenAI, custom)
+ */
+export function validateApiKey(apiKey: string): boolean {
+ if (!apiKey || apiKey.trim() === '') {
+ return false;
+ }
+
+ const trimmed = apiKey.trim();
+
+ // Too short to be a real API key
+ if (trimmed.length < 12) {
+ return false;
+ }
+
+ // Accept common API key formats
+ // Anthropic: sk-ant-...
+ // OpenAI: sk-proj-... or sk-...
+ // Custom: any reasonable length key with alphanumeric chars
+ const hasValidChars = /^[a-zA-Z0-9\-_+.]+$/.test(trimmed);
+
+ return hasValidChars;
+}
+
+/**
+ * Validate that profile name is unique (case-insensitive, trimmed)
+ */
+export async function validateProfileNameUnique(name: string): Promise {
+ const trimmed = name.trim().toLowerCase();
+
+ const file = await loadProfilesFile();
+
+ // Check if any profile has the same name (case-insensitive)
+ const exists = file.profiles.some(
+ (p) => p.name.trim().toLowerCase() === trimmed
+ );
+
+ return !exists;
+}
+
+/**
+ * Input type for creating a profile (without id, createdAt, updatedAt)
+ */
+export type CreateProfileInput = Omit;
+
+/**
+ * Input type for updating a profile (with id, without createdAt, updatedAt)
+ */
+export type UpdateProfileInput = Pick & CreateProfileInput;
+
+/**
+ * Delete a profile with validation
+ * Throws errors for validation failures
+ */
+export async function deleteProfile(id: string): Promise